diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h index 611c82306fdf1eb410246a381df93941426dd06b..55e0459b9a03d595dd39f6e4ccb399f11443fde0 100644 --- a/arch/x86/include/asm/intel_rdt.h +++ b/arch/x86/include/asm/intel_rdt.h @@ -79,7 +79,7 @@ struct rftype { * @capable: Is this feature available on this machine * @name: Name to use in "schemata" file * @num_closid: Number of CLOSIDs available - * @max_cbm: Largest Cache Bit Mask allowed + * @default_ctrl: Specifies default cache cbm or mem b/w percent. * @data_width: Character width of data when displaying * @min_cbm_bits: Minimum number of consecutive bits to be set * in a cache bit mask @@ -97,7 +97,7 @@ struct rdt_resource { int num_closid; int cbm_len; int min_cbm_bits; - u32 max_cbm; + u32 default_ctrl; int data_width; struct list_head domains; int msr_base; @@ -111,17 +111,17 @@ struct rdt_resource { * @list: all instances of this resource * @id: unique id for this instance * @cpu_mask: which cpus share this resource - * @cbm: array of cache bit masks (indexed by CLOSID) - * @new_cbm: new cbm value to be loaded - * @have_new_cbm: did user provide new_cbm for this domain + * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID) + * @new_ctrl: new ctrl value to be loaded + * @have_new_ctrl: did user provide new_ctrl for this domain */ struct rdt_domain { struct list_head list; int id; struct cpumask cpu_mask; - u32 *cbm; - u32 new_cbm; - bool have_new_cbm; + u32 *ctrl_val; + u32 new_ctrl; + bool have_new_ctrl; }; /** @@ -172,8 +172,8 @@ union cpuid_0x10_1_eax { unsigned int full; }; -/* CPUID.(EAX=10H, ECX=ResID=1).EDX */ -union cpuid_0x10_1_edx { +/* CPUID.(EAX=10H, ECX=ResID).EDX */ +union cpuid_0x10_x_edx { struct { unsigned int cos_max:16; } split; @@ -182,7 +182,7 @@ union cpuid_0x10_1_edx { DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid); -void rdt_cbm_update(void *arg); +void rdt_ctrl_update(void *arg); struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); void rdtgroup_kn_unlock(struct kernfs_node *kn); ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index d2e5f92b5428cd7e0504bde6922be8bc3b10ca3f..92d8431fdc38863bd2216493a3e6a32a20a227eb 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -125,7 +125,7 @@ static inline bool cache_alloc_hsw_probe(void) r->num_closid = 4; r->cbm_len = 20; - r->max_cbm = max_cbm; + r->default_ctrl = max_cbm; r->min_cbm_bits = 2; r->capable = true; r->enabled = true; @@ -136,16 +136,16 @@ static inline bool cache_alloc_hsw_probe(void) return false; } -static void rdt_get_config(int idx, struct rdt_resource *r) +static void rdt_get_cache_config(int idx, struct rdt_resource *r) { union cpuid_0x10_1_eax eax; - union cpuid_0x10_1_edx edx; + union cpuid_0x10_x_edx edx; u32 ebx, ecx; cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); r->num_closid = edx.split.cos_max + 1; r->cbm_len = eax.split.cbm_len + 1; - r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1; + r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; r->data_width = (r->cbm_len + 3) / 4; r->capable = true; r->enabled = true; @@ -158,7 +158,7 @@ static void rdt_get_cdp_l3_config(int type) r->num_closid = r_l3->num_closid / 2; r->cbm_len = r_l3->cbm_len; - r->max_cbm = r_l3->max_cbm; + r->default_ctrl = r_l3->default_ctrl; r->data_width = (r->cbm_len + 3) / 4; r->capable = true; /* @@ -181,7 +181,7 @@ static int get_cache_id(int cpu, int level) return -1; } -void rdt_cbm_update(void *arg) +void rdt_ctrl_update(void *arg) { struct msr_param *m = (struct msr_param *)arg; struct rdt_resource *r = m->res; @@ -202,7 +202,7 @@ void rdt_cbm_update(void *arg) for (i = m->low; i < m->high; i++) { int idx = cbm_idx(r, i); - wrmsrl(r->msr_base + idx, d->cbm[i]); + wrmsrl(r->msr_base + idx, d->ctrl_val[i]); } } @@ -275,8 +275,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) d->id = id; - d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL); - if (!d->cbm) { + d->ctrl_val = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL); + if (!d->ctrl_val) { kfree(d); return; } @@ -284,8 +284,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) for (i = 0; i < r->num_closid; i++) { int idx = cbm_idx(r, i); - d->cbm[i] = r->max_cbm; - wrmsrl(r->msr_base + idx, d->cbm[i]); + d->ctrl_val[i] = r->default_ctrl; + wrmsrl(r->msr_base + idx, d->ctrl_val[i]); } cpumask_set_cpu(cpu, &d->cpu_mask); @@ -305,7 +305,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) cpumask_clear_cpu(cpu, &d->cpu_mask); if (cpumask_empty(&d->cpu_mask)) { - kfree(d->cbm); + kfree(d->ctrl_val); list_del(&d->list); kfree(d); } @@ -383,7 +383,7 @@ static __init bool get_rdt_resources(void) return false; if (boot_cpu_has(X86_FEATURE_CAT_L3)) { - rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]); + rdt_get_cache_config(1, &rdt_resources_all[RDT_RESOURCE_L3]); if (boot_cpu_has(X86_FEATURE_CDP_L3)) { rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA); rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE); @@ -392,7 +392,7 @@ static __init bool get_rdt_resources(void) } if (boot_cpu_has(X86_FEATURE_CAT_L2)) { /* CPUID 0x10.2 fields are same format at 0x10.1 */ - rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]); + rdt_get_cache_config(2, &rdt_resources_all[RDT_RESOURCE_L2]); ret = true; } return ret; diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 6870ebfcdcb3bb07830c40729612f9c2b1397ed5..380ee9d8ee6f82ebbc844c96ced6c6013d07ab6c 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -519,12 +519,12 @@ static int rdt_num_closids_show(struct kernfs_open_file *of, return 0; } -static int rdt_cbm_mask_show(struct kernfs_open_file *of, +static int rdt_default_ctrl_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct rdt_resource *r = of->kn->parent->priv; - seq_printf(seq, "%x\n", r->max_cbm); + seq_printf(seq, "%x\n", r->default_ctrl); return 0; } @@ -551,7 +551,7 @@ static struct rftype res_info_files[] = { .name = "cbm_mask", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_cbm_mask_show, + .seq_show = rdt_default_ctrl_show, }, { .name = "min_cbm_bits", @@ -801,7 +801,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, return dentry; } -static int reset_all_cbms(struct rdt_resource *r) +static int reset_all_ctrls(struct rdt_resource *r) { struct msr_param msr_param; cpumask_var_t cpu_mask; @@ -824,14 +824,14 @@ static int reset_all_cbms(struct rdt_resource *r) cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); for (i = 0; i < r->num_closid; i++) - d->cbm[i] = r->max_cbm; + d->ctrl_val[i] = r->default_ctrl; } cpu = get_cpu(); /* Update CBM on this cpu if it's in cpu_mask. */ if (cpumask_test_cpu(cpu, cpu_mask)) - rdt_cbm_update(&msr_param); + rdt_ctrl_update(&msr_param); /* Update CBM on all other cpus in cpu_mask. */ - smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1); + smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1); put_cpu(); free_cpumask_var(cpu_mask); @@ -917,7 +917,7 @@ static void rdt_kill_sb(struct super_block *sb) /*Put everything back to default values. */ for_each_enabled_rdt_resource(r) - reset_all_cbms(r); + reset_all_ctrls(r); cdp_disable(); rmdir_all_sub(); static_branch_disable(&rdt_enable_key); diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c index 8594db455aa1b18a462949e006eb6b6fe76d8429..7695179776ba0eb81594550e237625340af04f23 100644 --- a/arch/x86/kernel/cpu/intel_rdt_schemata.c +++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c @@ -38,7 +38,7 @@ static bool cbm_validate(unsigned long var, struct rdt_resource *r) { unsigned long first_bit, zero_bit; - if (var == 0 || var > r->max_cbm) + if (var == 0 || var > r->default_ctrl) return false; first_bit = find_first_bit(&var, r->cbm_len); @@ -61,7 +61,7 @@ static int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d) unsigned long data; int ret; - if (d->have_new_cbm) + if (d->have_new_ctrl) return -EINVAL; ret = kstrtoul(buf, 16, &data); @@ -69,8 +69,8 @@ static int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d) return ret; if (!cbm_validate(data, r)) return -EINVAL; - d->new_cbm = data; - d->have_new_cbm = true; + d->new_ctrl = data; + d->have_new_ctrl = true; return 0; } @@ -119,9 +119,9 @@ static int update_domains(struct rdt_resource *r, int closid) msr_param.res = r; list_for_each_entry(d, &r->domains, list) { - if (d->have_new_cbm && d->new_cbm != d->cbm[closid]) { + if (d->have_new_ctrl && d->new_ctrl != d->ctrl_val[closid]) { cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - d->cbm[closid] = d->new_cbm; + d->ctrl_val[closid] = d->new_ctrl; } } if (cpumask_empty(cpu_mask)) @@ -129,9 +129,9 @@ static int update_domains(struct rdt_resource *r, int closid) cpu = get_cpu(); /* Update CBM on this cpu if it's in cpu_mask. */ if (cpumask_test_cpu(cpu, cpu_mask)) - rdt_cbm_update(&msr_param); + rdt_ctrl_update(&msr_param); /* Update CBM on other cpus. */ - smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1); + smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1); put_cpu(); done: @@ -164,7 +164,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, for_each_enabled_rdt_resource(r) list_for_each_entry(dom, &r->domains, list) - dom->have_new_cbm = false; + dom->have_new_ctrl = false; while ((tok = strsep(&buf, "\n")) != NULL) { resname = strsep(&tok, ":"); @@ -208,7 +208,7 @@ static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid) if (sep) seq_puts(s, ";"); seq_printf(s, "%d=%0*x", dom->id, max_data_width, - dom->cbm[closid]); + dom->ctrl_val[closid]); sep = true; } seq_puts(s, "\n");