diff options
author | James Morse <james.morse@arm.com> | 2021-07-28 17:06:14 +0000 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2021-08-11 11:51:34 +0200 |
commit | 63c8b1231929b8aa80abc753c1c91b6b49e2c0b0 (patch) | |
tree | 697c959a7f6ddc242cc8a1ca2195ad0b4fee8db2 /arch/x86/kernel/cpu/resctrl/rdtgroup.c | |
parent | Linux 5.14-rc5 (diff) | |
download | linux-dev-63c8b1231929b8aa80abc753c1c91b6b49e2c0b0.tar.xz linux-dev-63c8b1231929b8aa80abc753c1c91b6b49e2c0b0.zip |
x86/resctrl: Split struct rdt_resource
resctrl is the defacto Linux ABI for SoC resource partitioning features.
To support it on another architecture, it needs to be abstracted from
the features provided by Intel RDT and AMD PQoS, and moved to /fs/.
struct rdt_resource contains a mix of architecture private details
and properties of the filesystem interface user-space uses.
Start by splitting struct rdt_resource, into an architecture private
'hw' struct, which contains the common resctrl structure that would be
used by any architecture. The foreach helpers are most commonly used by
the filesystem code, and should return the common resctrl structure.
for_each_rdt_resource() is changed to walk the common structure in its
parent arch private structure.
Move as much of the structure as possible into the common structure
in the core code's header file. The x86 hardware accessors remain
part of the architecture private code, as do num_closid, mon_scale
and mbm_width.
mon_scale and mbm_width are used to detect overflow of the hardware
counters, and convert them from their native size to bytes. Any
cross-architecture abstraction should be in terms of bytes, making
these properties private.
The hardware's num_closid is kept in the private structure to force the
filesystem code to use a helper to access it. MPAM would return a single
value for the system, regardless of the resource. Using the helper
prevents this field from being confused with the version of num_closid
that is being exposed to user-space (added in a later patch).
After this split, filesystem code touching a 'hw' struct indicates
where an abstraction is needed.
Splitting this structure only moves types around, and should not lead
to any change in behaviour.
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Jamie Iles <jamie@nuviainc.com>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Tested-by: Babu Moger <babu.moger@amd.com>
Link: https://lkml.kernel.org/r/20210728170637.25610-2-james.morse@arm.com
Diffstat (limited to '')
-rw-r--r-- | arch/x86/kernel/cpu/resctrl/rdtgroup.c | 68 |
1 files changed, 38 insertions, 30 deletions
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 01fd30e7829d..d49a00754d98 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -100,12 +100,15 @@ int closids_supported(void) static void closid_init(void) { + struct rdt_hw_resource *hw_res; struct rdt_resource *r; int rdt_min_closid = 32; /* Compute rdt_min_closid across all resources */ - for_each_alloc_enabled_rdt_resource(r) - rdt_min_closid = min(rdt_min_closid, r->num_closid); + for_each_alloc_enabled_rdt_resource(r) { + hw_res = resctrl_to_arch_res(r); + rdt_min_closid = min(rdt_min_closid, hw_res->num_closid); + } closid_free_map = BIT_MASK(rdt_min_closid) - 1; @@ -843,8 +846,10 @@ static int rdt_num_closids_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct rdt_resource *r = of->kn->parent->priv; + struct rdt_hw_resource *hw_res; - seq_printf(seq, "%d\n", r->num_closid); + hw_res = resctrl_to_arch_res(r); + seq_printf(seq, "%d\n", hw_res->num_closid); return 0; } @@ -1020,8 +1025,9 @@ static int max_threshold_occ_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct rdt_resource *r = of->kn->parent->priv; + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale); + seq_printf(seq, "%u\n", resctrl_cqm_threshold * hw_res->mon_scale); return 0; } @@ -1042,7 +1048,7 @@ static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { - struct rdt_resource *r = of->kn->parent->priv; + struct rdt_hw_resource *hw_res; unsigned int bytes; int ret; @@ -1053,7 +1059,8 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, if (bytes > (boot_cpu_data.x86_cache_size * 1024)) return -EINVAL; - resctrl_cqm_threshold = bytes / r->mon_scale; + hw_res = resctrl_to_arch_res(of->kn->parent->priv); + resctrl_cqm_threshold = bytes / hw_res->mon_scale; return nbytes; } @@ -1111,16 +1118,16 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, switch (r->rid) { case RDT_RESOURCE_L3DATA: - _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE]; + _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE].r_resctrl; break; case RDT_RESOURCE_L3CODE: - _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA]; + _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA].r_resctrl; break; case RDT_RESOURCE_L2DATA: - _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE]; + _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE].r_resctrl; break; case RDT_RESOURCE_L2CODE: - _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA]; + _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA].r_resctrl; break; default: ret = -ENOENT; @@ -1867,7 +1874,7 @@ static void l2_qos_cfg_update(void *arg) static inline bool is_mba_linear(void) { - return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear; + return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear; } static int set_cache_qos_cfg(int level, bool enable) @@ -1888,7 +1895,7 @@ static int set_cache_qos_cfg(int level, bool enable) if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; - r_l = &rdt_resources_all[level]; + r_l = &rdt_resources_all[level].r_resctrl; list_for_each_entry(d, &r_l->domains, list) { if (r_l->cache.arch_has_per_cpu_cfg) /* Pick all the CPUs in the domain instance */ @@ -1917,10 +1924,10 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r) if (!r->alloc_capable) return; - if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA]) + if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA].r_resctrl) l2_qos_cfg_update(&r->alloc_enabled); - if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA]) + if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA].r_resctrl) l3_qos_cfg_update(&r->alloc_enabled); } @@ -1932,7 +1939,7 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r) */ static int set_mba_sc(bool mba_sc) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA]; + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; struct rdt_domain *d; if (!is_mbm_enabled() || !is_mba_linear() || @@ -1948,9 +1955,9 @@ static int set_mba_sc(bool mba_sc) static int cdp_enable(int level, int data_type, int code_type) { - struct rdt_resource *r_ldata = &rdt_resources_all[data_type]; - struct rdt_resource *r_lcode = &rdt_resources_all[code_type]; - struct rdt_resource *r_l = &rdt_resources_all[level]; + struct rdt_resource *r_ldata = &rdt_resources_all[data_type].r_resctrl; + struct rdt_resource *r_lcode = &rdt_resources_all[code_type].r_resctrl; + struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; int ret; if (!r_l->alloc_capable || !r_ldata->alloc_capable || @@ -1980,13 +1987,13 @@ static int cdpl2_enable(void) static void cdp_disable(int level, int data_type, int code_type) { - struct rdt_resource *r = &rdt_resources_all[level]; + struct rdt_resource *r = &rdt_resources_all[level].r_resctrl; r->alloc_enabled = r->alloc_capable; - if (rdt_resources_all[data_type].alloc_enabled) { - rdt_resources_all[data_type].alloc_enabled = false; - rdt_resources_all[code_type].alloc_enabled = false; + if (rdt_resources_all[data_type].r_resctrl.alloc_enabled) { + rdt_resources_all[data_type].r_resctrl.alloc_enabled = false; + rdt_resources_all[code_type].r_resctrl.alloc_enabled = false; set_cache_qos_cfg(level, false); } } @@ -2003,9 +2010,9 @@ static void cdpl2_disable(void) static void cdp_disable_all(void) { - if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) + if (rdt_resources_all[RDT_RESOURCE_L3DATA].r_resctrl.alloc_enabled) cdpl3_disable(); - if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) + if (rdt_resources_all[RDT_RESOURCE_L2DATA].r_resctrl.alloc_enabled) cdpl2_disable(); } @@ -2153,7 +2160,7 @@ static int rdt_get_tree(struct fs_context *fc) static_branch_enable_cpuslocked(&rdt_enable_key); if (is_mbm_enabled()) { - r = &rdt_resources_all[RDT_RESOURCE_L3]; + r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; list_for_each_entry(dom, &r->domains, list) mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); } @@ -2257,6 +2264,7 @@ static int rdt_init_fs_context(struct fs_context *fc) static int reset_all_ctrls(struct rdt_resource *r) { + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct msr_param msr_param; cpumask_var_t cpu_mask; struct rdt_domain *d; @@ -2267,7 +2275,7 @@ static int reset_all_ctrls(struct rdt_resource *r) msr_param.res = r; msr_param.low = 0; - msr_param.high = r->num_closid; + msr_param.high = hw_res->num_closid; /* * Disable resource control for this resource by setting all @@ -2277,7 +2285,7 @@ static int reset_all_ctrls(struct rdt_resource *r) list_for_each_entry(d, &r->domains, list) { cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - for (i = 0; i < r->num_closid; i++) + for (i = 0; i < hw_res->num_closid; i++) d->ctrl_val[i] = r->default_ctrl; } cpu = get_cpu(); @@ -3124,13 +3132,13 @@ out: static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) { - if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) + if (rdt_resources_all[RDT_RESOURCE_L3DATA].r_resctrl.alloc_enabled) seq_puts(seq, ",cdp"); - if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) + if (rdt_resources_all[RDT_RESOURCE_L2DATA].r_resctrl.alloc_enabled) seq_puts(seq, ",cdpl2"); - if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA])) + if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) seq_puts(seq, ",mba_MBps"); return 0; |