- A first round of changes towards splitting the arch-specific bits from
the filesystem bits of resctrl, the ultimate goal being to support ARM's equivalent technology MPAM, with the same fs interface (James Morse) -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmEsrC8ACgkQEsHwGGHe VUowsA//TblmI1t1kz6JUAneOVmZILJKRKyekrUsMr3twPpUcH1zMS4yE+ohE06X IZZYh3jQokirOxQAmntbh2/uLz5AntldRDzNCEsBZerLz1kW502xaLYMCWg5wPbs LWqacvHmOmQtXpB5fxA/jNIHxQyfKc1z/wWjTMwpU6K0P0usflx1UmSaiW7Kol6y KY8B1V1DRYsCmtGQ+0Ww4Fye6TZ/w9jwwFolerSVqXy0I8TZNFITUCfmkFeSbAIp uQMBXq5SGHn+Q9AsLB2xBhLmqkIb5482eC096t/UJPdFWcBnYdMsOmuLSxz6IuVF z5gMctnsoIwdhtV7YSpTPIKopGkZKuyN4EIEwv3LVn2J9FatoeWDZu7kUyWr9WaQ rp0u+09gxUBe68h9bNQkvUfSqi5RVSsijKzQ1vs5PpsTgo4lsPjtb4AjHr9bys/e 2GSMuhaLv+h/kNnte1PXnO5+8+8Yt0N16rVPw8aJE543o2jOm21LUPb/TuAHEmi8 uKn1iq3Dt9gzep92WMFXHwapbwkpF8NXqrcP/ibN97cXx3pt8wPL3yIqOl7rRsGI BxkGDHP33aBEqbBuMFo9wW8P7whdo9ZhqjfLNmL+6HIM+JXXa0dC6sd2aD6RDrx7 +C0zffDWNfXiUWqC7bFURqRBj3bJ7oufLye3eSPHIMsssYIZYgQ= =1WGp -----END PGP SIGNATURE----- Merge tag 'x86_cache_for_v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 resource control updates from Borislav Petkov: "A first round of changes towards splitting the arch-specific bits from the filesystem bits of resctrl, the ultimate goal being to support ARM's equivalent technology MPAM, with the same fs interface (James Morse)" * tag 'x86_cache_for_v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits) x86/resctrl: Make resctrl_arch_get_config() return its value x86/resctrl: Merge the CDP resources x86/resctrl: Expand resctrl_arch_update_domains()'s msr_param range x86/resctrl: Remove rdt_cdp_peer_get() x86/resctrl: Merge the ctrl_val arrays x86/resctrl: Calculate the index from the configuration type x86/resctrl: Apply offset correction when config is staged x86/resctrl: Make ctrlval arrays the same size x86/resctrl: Pass configuration type to resctrl_arch_get_config() x86/resctrl: Add a helper to read a closid's configuration x86/resctrl: Rename update_domains() to resctrl_arch_update_domains() x86/resctrl: Allow different CODE/DATA configurations to be staged x86/resctrl: Group staged configuration into a separate struct x86/resctrl: Move the schemata names into struct resctrl_schema x86/resctrl: Add a helper to read/set the CDP configuration x86/resctrl: Swizzle rdt_resource and resctrl_schema in pseudo_lock_region x86/resctrl: Pass the schema to resctrl filesystem functions x86/resctrl: Add resctrl_arch_get_num_closid() x86/resctrl: Store the effective num_closid in the schema x86/resctrl: Walk the resctrl schema list instead of an arch list ...
This commit is contained in:
commit
42f6e869a0
|
@ -57,128 +57,57 @@ static void
|
|||
mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
|
||||
struct rdt_resource *r);
|
||||
|
||||
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
|
||||
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
|
||||
|
||||
struct rdt_resource rdt_resources_all[] = {
|
||||
struct rdt_hw_resource rdt_resources_all[] = {
|
||||
[RDT_RESOURCE_L3] =
|
||||
{
|
||||
.rid = RDT_RESOURCE_L3,
|
||||
.name = "L3",
|
||||
.domains = domain_init(RDT_RESOURCE_L3),
|
||||
.r_resctrl = {
|
||||
.rid = RDT_RESOURCE_L3,
|
||||
.name = "L3",
|
||||
.cache_level = 3,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
},
|
||||
.domains = domain_init(RDT_RESOURCE_L3),
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
.fflags = RFTYPE_RES_CACHE,
|
||||
},
|
||||
.msr_base = MSR_IA32_L3_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 3,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
.cbm_idx_mult = 1,
|
||||
.cbm_idx_offset = 0,
|
||||
},
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
.fflags = RFTYPE_RES_CACHE,
|
||||
},
|
||||
[RDT_RESOURCE_L3DATA] =
|
||||
{
|
||||
.rid = RDT_RESOURCE_L3DATA,
|
||||
.name = "L3DATA",
|
||||
.domains = domain_init(RDT_RESOURCE_L3DATA),
|
||||
.msr_base = MSR_IA32_L3_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 3,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
.cbm_idx_mult = 2,
|
||||
.cbm_idx_offset = 0,
|
||||
},
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
.fflags = RFTYPE_RES_CACHE,
|
||||
},
|
||||
[RDT_RESOURCE_L3CODE] =
|
||||
{
|
||||
.rid = RDT_RESOURCE_L3CODE,
|
||||
.name = "L3CODE",
|
||||
.domains = domain_init(RDT_RESOURCE_L3CODE),
|
||||
.msr_base = MSR_IA32_L3_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 3,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
.cbm_idx_mult = 2,
|
||||
.cbm_idx_offset = 1,
|
||||
},
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
.fflags = RFTYPE_RES_CACHE,
|
||||
},
|
||||
[RDT_RESOURCE_L2] =
|
||||
{
|
||||
.rid = RDT_RESOURCE_L2,
|
||||
.name = "L2",
|
||||
.domains = domain_init(RDT_RESOURCE_L2),
|
||||
.r_resctrl = {
|
||||
.rid = RDT_RESOURCE_L2,
|
||||
.name = "L2",
|
||||
.cache_level = 2,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
},
|
||||
.domains = domain_init(RDT_RESOURCE_L2),
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
.fflags = RFTYPE_RES_CACHE,
|
||||
},
|
||||
.msr_base = MSR_IA32_L2_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 2,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
.cbm_idx_mult = 1,
|
||||
.cbm_idx_offset = 0,
|
||||
},
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
.fflags = RFTYPE_RES_CACHE,
|
||||
},
|
||||
[RDT_RESOURCE_L2DATA] =
|
||||
{
|
||||
.rid = RDT_RESOURCE_L2DATA,
|
||||
.name = "L2DATA",
|
||||
.domains = domain_init(RDT_RESOURCE_L2DATA),
|
||||
.msr_base = MSR_IA32_L2_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 2,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
.cbm_idx_mult = 2,
|
||||
.cbm_idx_offset = 0,
|
||||
},
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
.fflags = RFTYPE_RES_CACHE,
|
||||
},
|
||||
[RDT_RESOURCE_L2CODE] =
|
||||
{
|
||||
.rid = RDT_RESOURCE_L2CODE,
|
||||
.name = "L2CODE",
|
||||
.domains = domain_init(RDT_RESOURCE_L2CODE),
|
||||
.msr_base = MSR_IA32_L2_CBM_BASE,
|
||||
.msr_update = cat_wrmsr,
|
||||
.cache_level = 2,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
.cbm_idx_mult = 2,
|
||||
.cbm_idx_offset = 1,
|
||||
},
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
.fflags = RFTYPE_RES_CACHE,
|
||||
},
|
||||
[RDT_RESOURCE_MBA] =
|
||||
{
|
||||
.rid = RDT_RESOURCE_MBA,
|
||||
.name = "MB",
|
||||
.domains = domain_init(RDT_RESOURCE_MBA),
|
||||
.cache_level = 3,
|
||||
.parse_ctrlval = parse_bw,
|
||||
.format_str = "%d=%*u",
|
||||
.fflags = RFTYPE_RES_MB,
|
||||
.r_resctrl = {
|
||||
.rid = RDT_RESOURCE_MBA,
|
||||
.name = "MB",
|
||||
.cache_level = 3,
|
||||
.domains = domain_init(RDT_RESOURCE_MBA),
|
||||
.parse_ctrlval = parse_bw,
|
||||
.format_str = "%d=%*u",
|
||||
.fflags = RFTYPE_RES_MB,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
|
||||
{
|
||||
return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
|
||||
* as they do not have CPUID enumeration support for Cache allocation.
|
||||
|
@ -199,7 +128,8 @@ static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
|
|||
*/
|
||||
static inline void cache_alloc_hsw_probe(void)
|
||||
{
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
|
||||
struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3];
|
||||
struct rdt_resource *r = &hw_res->r_resctrl;
|
||||
u32 l, h, max_cbm = BIT_MASK(20) - 1;
|
||||
|
||||
if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
|
||||
|
@ -211,7 +141,7 @@ static inline void cache_alloc_hsw_probe(void)
|
|||
if (l != max_cbm)
|
||||
return;
|
||||
|
||||
r->num_closid = 4;
|
||||
hw_res->num_closid = 4;
|
||||
r->default_ctrl = max_cbm;
|
||||
r->cache.cbm_len = 20;
|
||||
r->cache.shareable_bits = 0xc0000;
|
||||
|
@ -225,7 +155,7 @@ static inline void cache_alloc_hsw_probe(void)
|
|||
bool is_mba_sc(struct rdt_resource *r)
|
||||
{
|
||||
if (!r)
|
||||
return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
|
||||
return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
|
||||
|
||||
return r->membw.mba_sc;
|
||||
}
|
||||
|
@ -253,12 +183,13 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
|
|||
|
||||
static bool __get_mem_config_intel(struct rdt_resource *r)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
union cpuid_0x10_3_eax eax;
|
||||
union cpuid_0x10_x_edx edx;
|
||||
u32 ebx, ecx, max_delay;
|
||||
|
||||
cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
|
||||
r->num_closid = edx.split.cos_max + 1;
|
||||
hw_res->num_closid = edx.split.cos_max + 1;
|
||||
max_delay = eax.split.max_delay + 1;
|
||||
r->default_ctrl = MAX_MBA_BW;
|
||||
r->membw.arch_needs_linear = true;
|
||||
|
@ -287,12 +218,13 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
|
|||
|
||||
static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
union cpuid_0x10_3_eax eax;
|
||||
union cpuid_0x10_x_edx edx;
|
||||
u32 ebx, ecx;
|
||||
|
||||
cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
|
||||
r->num_closid = edx.split.cos_max + 1;
|
||||
hw_res->num_closid = edx.split.cos_max + 1;
|
||||
r->default_ctrl = MAX_MBA_BW_AMD;
|
||||
|
||||
/* AMD does not use delay */
|
||||
|
@ -317,12 +249,13 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
|
|||
|
||||
static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
union cpuid_0x10_1_eax eax;
|
||||
union cpuid_0x10_x_edx edx;
|
||||
u32 ebx, ecx;
|
||||
|
||||
cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
|
||||
r->num_closid = edx.split.cos_max + 1;
|
||||
hw_res->num_closid = edx.split.cos_max + 1;
|
||||
r->cache.cbm_len = eax.split.cbm_len + 1;
|
||||
r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
|
||||
r->cache.shareable_bits = ebx & r->default_ctrl;
|
||||
|
@ -331,43 +264,35 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
|
|||
r->alloc_enabled = true;
|
||||
}
|
||||
|
||||
static void rdt_get_cdp_config(int level, int type)
|
||||
static void rdt_get_cdp_config(int level)
|
||||
{
|
||||
struct rdt_resource *r_l = &rdt_resources_all[level];
|
||||
struct rdt_resource *r = &rdt_resources_all[type];
|
||||
|
||||
r->num_closid = r_l->num_closid / 2;
|
||||
r->cache.cbm_len = r_l->cache.cbm_len;
|
||||
r->default_ctrl = r_l->default_ctrl;
|
||||
r->cache.shareable_bits = r_l->cache.shareable_bits;
|
||||
r->data_width = (r->cache.cbm_len + 3) / 4;
|
||||
r->alloc_capable = true;
|
||||
/*
|
||||
* By default, CDP is disabled. CDP can be enabled by mount parameter
|
||||
* "cdp" during resctrl file system mount time.
|
||||
*/
|
||||
r->alloc_enabled = false;
|
||||
rdt_resources_all[level].cdp_enabled = false;
|
||||
rdt_resources_all[level].r_resctrl.cdp_capable = true;
|
||||
}
|
||||
|
||||
static void rdt_get_cdp_l3_config(void)
|
||||
{
|
||||
rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
|
||||
rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
|
||||
rdt_get_cdp_config(RDT_RESOURCE_L3);
|
||||
}
|
||||
|
||||
static void rdt_get_cdp_l2_config(void)
|
||||
{
|
||||
rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
|
||||
rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
|
||||
rdt_get_cdp_config(RDT_RESOURCE_L2);
|
||||
}
|
||||
|
||||
static void
|
||||
mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
|
||||
{
|
||||
unsigned int i;
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
|
||||
for (i = m->low; i < m->high; i++)
|
||||
wrmsrl(r->msr_base + i, d->ctrl_val[i]);
|
||||
wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -389,19 +314,23 @@ mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
|
|||
struct rdt_resource *r)
|
||||
{
|
||||
unsigned int i;
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
|
||||
/* Write the delay values for mba. */
|
||||
for (i = m->low; i < m->high; i++)
|
||||
wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
|
||||
wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
|
||||
}
|
||||
|
||||
static void
|
||||
cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
|
||||
{
|
||||
unsigned int i;
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
|
||||
for (i = m->low; i < m->high; i++)
|
||||
wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
|
||||
wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
|
||||
}
|
||||
|
||||
struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
|
||||
|
@ -417,16 +346,22 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
|
||||
{
|
||||
return resctrl_to_arch_res(r)->num_closid;
|
||||
}
|
||||
|
||||
void rdt_ctrl_update(void *arg)
|
||||
{
|
||||
struct msr_param *m = arg;
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
|
||||
struct rdt_resource *r = m->res;
|
||||
int cpu = smp_processor_id();
|
||||
struct rdt_domain *d;
|
||||
|
||||
d = get_domain_from_cpu(cpu, r);
|
||||
if (d) {
|
||||
r->msr_update(d, m, r);
|
||||
hw_res->msr_update(d, m, r);
|
||||
return;
|
||||
}
|
||||
pr_warn_once("cpu %d not found in any domain for resource %s\n",
|
||||
|
@ -468,6 +403,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
|
|||
|
||||
void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -476,7 +412,7 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
|
|||
* For Memory Allocation: Set b/w requested to 100%
|
||||
* and the bandwidth in MBps to U32_MAX
|
||||
*/
|
||||
for (i = 0; i < r->num_closid; i++, dc++, dm++) {
|
||||
for (i = 0; i < hw_res->num_closid; i++, dc++, dm++) {
|
||||
*dc = r->default_ctrl;
|
||||
*dm = MBA_MAX_MBPS;
|
||||
}
|
||||
|
@ -484,26 +420,30 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
|
|||
|
||||
static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct msr_param m;
|
||||
u32 *dc, *dm;
|
||||
|
||||
dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
|
||||
dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
|
||||
GFP_KERNEL);
|
||||
if (!dc)
|
||||
return -ENOMEM;
|
||||
|
||||
dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL);
|
||||
dm = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->mbps_val),
|
||||
GFP_KERNEL);
|
||||
if (!dm) {
|
||||
kfree(dc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
d->ctrl_val = dc;
|
||||
d->mbps_val = dm;
|
||||
hw_dom->ctrl_val = dc;
|
||||
hw_dom->mbps_val = dm;
|
||||
setup_default_ctrlval(r, dc, dm);
|
||||
|
||||
m.low = 0;
|
||||
m.high = r->num_closid;
|
||||
r->msr_update(d, &m, r);
|
||||
m.high = hw_res->num_closid;
|
||||
hw_res->msr_update(d, &m, r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -560,6 +500,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
|
|||
{
|
||||
int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
|
||||
struct list_head *add_pos = NULL;
|
||||
struct rdt_hw_domain *hw_dom;
|
||||
struct rdt_domain *d;
|
||||
|
||||
d = rdt_find_domain(r, id, &add_pos);
|
||||
|
@ -575,10 +516,11 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
|
|||
return;
|
||||
}
|
||||
|
||||
d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!d)
|
||||
hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!hw_dom)
|
||||
return;
|
||||
|
||||
d = &hw_dom->d_resctrl;
|
||||
d->id = id;
|
||||
cpumask_set_cpu(cpu, &d->cpu_mask);
|
||||
|
||||
|
@ -607,6 +549,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
|
|||
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
||||
{
|
||||
int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
|
||||
struct rdt_hw_domain *hw_dom;
|
||||
struct rdt_domain *d;
|
||||
|
||||
d = rdt_find_domain(r, id, NULL);
|
||||
|
@ -614,6 +557,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
|||
pr_warn("Couldn't find cache id for CPU %d\n", cpu);
|
||||
return;
|
||||
}
|
||||
hw_dom = resctrl_to_arch_dom(d);
|
||||
|
||||
cpumask_clear_cpu(cpu, &d->cpu_mask);
|
||||
if (cpumask_empty(&d->cpu_mask)) {
|
||||
|
@ -646,16 +590,16 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
|||
if (d->plr)
|
||||
d->plr->d = NULL;
|
||||
|
||||
kfree(d->ctrl_val);
|
||||
kfree(d->mbps_val);
|
||||
kfree(hw_dom->ctrl_val);
|
||||
kfree(hw_dom->mbps_val);
|
||||
bitmap_free(d->rmid_busy_llc);
|
||||
kfree(d->mbm_total);
|
||||
kfree(d->mbm_local);
|
||||
kfree(d);
|
||||
kfree(hw_dom);
|
||||
return;
|
||||
}
|
||||
|
||||
if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
|
||||
if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) {
|
||||
if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
|
||||
cancel_delayed_work(&d->mbm_over);
|
||||
mbm_setup_overflow_handler(d, 0);
|
||||
|
@ -732,13 +676,8 @@ static int resctrl_offline_cpu(unsigned int cpu)
|
|||
static __init void rdt_init_padding(void)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
int cl;
|
||||
|
||||
for_each_alloc_capable_rdt_resource(r) {
|
||||
cl = strlen(r->name);
|
||||
if (cl > max_name_width)
|
||||
max_name_width = cl;
|
||||
|
||||
if (r->data_width > max_data_width)
|
||||
max_data_width = r->data_width;
|
||||
}
|
||||
|
@ -827,19 +766,22 @@ static bool __init rdt_cpu_has(int flag)
|
|||
|
||||
static __init bool get_mem_config(void)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
|
||||
|
||||
if (!rdt_cpu_has(X86_FEATURE_MBA))
|
||||
return false;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
|
||||
return __get_mem_config_intel(&hw_res->r_resctrl);
|
||||
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
|
||||
return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static __init bool get_rdt_alloc_resources(void)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
bool ret = false;
|
||||
|
||||
if (rdt_alloc_capable)
|
||||
|
@ -849,14 +791,16 @@ static __init bool get_rdt_alloc_resources(void)
|
|||
return false;
|
||||
|
||||
if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
|
||||
rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
rdt_get_cache_alloc_cfg(1, r);
|
||||
if (rdt_cpu_has(X86_FEATURE_CDP_L3))
|
||||
rdt_get_cdp_l3_config();
|
||||
ret = true;
|
||||
}
|
||||
if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
|
||||
/* CPUID 0x10.2 fields are same format at 0x10.1 */
|
||||
rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
|
||||
rdt_get_cache_alloc_cfg(2, r);
|
||||
if (rdt_cpu_has(X86_FEATURE_CDP_L2))
|
||||
rdt_get_cdp_l2_config();
|
||||
ret = true;
|
||||
|
@ -870,6 +814,8 @@ static __init bool get_rdt_alloc_resources(void)
|
|||
|
||||
static __init bool get_rdt_mon_resources(void)
|
||||
{
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
|
||||
if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
|
||||
rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
|
||||
if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
|
||||
|
@ -880,7 +826,7 @@ static __init bool get_rdt_mon_resources(void)
|
|||
if (!rdt_mon_features)
|
||||
return false;
|
||||
|
||||
return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
|
||||
return !rdt_get_mon_l3_config(r);
|
||||
}
|
||||
|
||||
static __init void __check_quirks_intel(void)
|
||||
|
@ -918,42 +864,40 @@ static __init bool get_rdt_resources(void)
|
|||
|
||||
static __init void rdt_init_res_defs_intel(void)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res;
|
||||
struct rdt_resource *r;
|
||||
|
||||
for_each_rdt_resource(r) {
|
||||
hw_res = resctrl_to_arch_res(r);
|
||||
|
||||
if (r->rid == RDT_RESOURCE_L3 ||
|
||||
r->rid == RDT_RESOURCE_L3DATA ||
|
||||
r->rid == RDT_RESOURCE_L3CODE ||
|
||||
r->rid == RDT_RESOURCE_L2 ||
|
||||
r->rid == RDT_RESOURCE_L2DATA ||
|
||||
r->rid == RDT_RESOURCE_L2CODE) {
|
||||
r->rid == RDT_RESOURCE_L2) {
|
||||
r->cache.arch_has_sparse_bitmaps = false;
|
||||
r->cache.arch_has_empty_bitmaps = false;
|
||||
r->cache.arch_has_per_cpu_cfg = false;
|
||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
r->msr_base = MSR_IA32_MBA_THRTL_BASE;
|
||||
r->msr_update = mba_wrmsr_intel;
|
||||
hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
|
||||
hw_res->msr_update = mba_wrmsr_intel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static __init void rdt_init_res_defs_amd(void)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res;
|
||||
struct rdt_resource *r;
|
||||
|
||||
for_each_rdt_resource(r) {
|
||||
hw_res = resctrl_to_arch_res(r);
|
||||
|
||||
if (r->rid == RDT_RESOURCE_L3 ||
|
||||
r->rid == RDT_RESOURCE_L3DATA ||
|
||||
r->rid == RDT_RESOURCE_L3CODE ||
|
||||
r->rid == RDT_RESOURCE_L2 ||
|
||||
r->rid == RDT_RESOURCE_L2DATA ||
|
||||
r->rid == RDT_RESOURCE_L2CODE) {
|
||||
r->rid == RDT_RESOURCE_L2) {
|
||||
r->cache.arch_has_sparse_bitmaps = true;
|
||||
r->cache.arch_has_empty_bitmaps = true;
|
||||
r->cache.arch_has_per_cpu_cfg = true;
|
||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
r->msr_base = MSR_IA32_MBA_BW_BASE;
|
||||
r->msr_update = mba_wrmsr_amd;
|
||||
hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
|
||||
hw_res->msr_update = mba_wrmsr_amd;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,20 +57,23 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
|
|||
return true;
|
||||
}
|
||||
|
||||
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
|
||||
struct rdt_domain *d)
|
||||
{
|
||||
struct resctrl_staged_config *cfg;
|
||||
struct rdt_resource *r = s->res;
|
||||
unsigned long bw_val;
|
||||
|
||||
if (d->have_new_ctrl) {
|
||||
cfg = &d->staged_config[s->conf_type];
|
||||
if (cfg->have_new_ctrl) {
|
||||
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!bw_validate(data->buf, &bw_val, r))
|
||||
return -EINVAL;
|
||||
d->new_ctrl = bw_val;
|
||||
d->have_new_ctrl = true;
|
||||
cfg->new_ctrl = bw_val;
|
||||
cfg->have_new_ctrl = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -125,13 +128,16 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
|
|||
* Read one cache bit mask (hex). Check that it is valid for the current
|
||||
* resource type.
|
||||
*/
|
||||
int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
|
||||
struct rdt_domain *d)
|
||||
{
|
||||
struct rdtgroup *rdtgrp = data->rdtgrp;
|
||||
struct resctrl_staged_config *cfg;
|
||||
struct rdt_resource *r = s->res;
|
||||
u32 cbm_val;
|
||||
|
||||
if (d->have_new_ctrl) {
|
||||
cfg = &d->staged_config[s->conf_type];
|
||||
if (cfg->have_new_ctrl) {
|
||||
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -160,12 +166,12 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
|||
* The CBM may not overlap with the CBM of another closid if
|
||||
* either is exclusive.
|
||||
*/
|
||||
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
|
||||
if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
|
||||
rdt_last_cmd_puts("Overlaps with exclusive group\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
|
||||
if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
|
||||
if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
|
||||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
rdt_last_cmd_puts("Overlaps with other group\n");
|
||||
|
@ -173,8 +179,8 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
|||
}
|
||||
}
|
||||
|
||||
d->new_ctrl = cbm_val;
|
||||
d->have_new_ctrl = true;
|
||||
cfg->new_ctrl = cbm_val;
|
||||
cfg->have_new_ctrl = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -185,9 +191,12 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
|||
* separated by ";". The "id" is in decimal, and must match one of
|
||||
* the "id"s for this resource.
|
||||
*/
|
||||
static int parse_line(char *line, struct rdt_resource *r,
|
||||
static int parse_line(char *line, struct resctrl_schema *s,
|
||||
struct rdtgroup *rdtgrp)
|
||||
{
|
||||
enum resctrl_conf_type t = s->conf_type;
|
||||
struct resctrl_staged_config *cfg;
|
||||
struct rdt_resource *r = s->res;
|
||||
struct rdt_parse_data data;
|
||||
char *dom = NULL, *id;
|
||||
struct rdt_domain *d;
|
||||
|
@ -213,9 +222,10 @@ next:
|
|||
if (d->id == dom_id) {
|
||||
data.buf = dom;
|
||||
data.rdtgrp = rdtgrp;
|
||||
if (r->parse_ctrlval(&data, r, d))
|
||||
if (r->parse_ctrlval(&data, s, d))
|
||||
return -EINVAL;
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
cfg = &d->staged_config[t];
|
||||
/*
|
||||
* In pseudo-locking setup mode and just
|
||||
* parsed a valid CBM that should be
|
||||
|
@ -224,9 +234,9 @@ next:
|
|||
* the required initialization for single
|
||||
* region and return.
|
||||
*/
|
||||
rdtgrp->plr->r = r;
|
||||
rdtgrp->plr->s = s;
|
||||
rdtgrp->plr->d = d;
|
||||
rdtgrp->plr->cbm = d->new_ctrl;
|
||||
rdtgrp->plr->cbm = cfg->new_ctrl;
|
||||
d->plr = rdtgrp->plr;
|
||||
return 0;
|
||||
}
|
||||
|
@ -236,28 +246,72 @@ next:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
int update_domains(struct rdt_resource *r, int closid)
|
||||
static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
|
||||
{
|
||||
switch (type) {
|
||||
default:
|
||||
case CDP_NONE:
|
||||
return closid;
|
||||
case CDP_CODE:
|
||||
return closid * 2 + 1;
|
||||
case CDP_DATA:
|
||||
return closid * 2;
|
||||
}
|
||||
}
|
||||
|
||||
static bool apply_config(struct rdt_hw_domain *hw_dom,
|
||||
struct resctrl_staged_config *cfg, u32 idx,
|
||||
cpumask_var_t cpu_mask, bool mba_sc)
|
||||
{
|
||||
struct rdt_domain *dom = &hw_dom->d_resctrl;
|
||||
u32 *dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
|
||||
|
||||
if (cfg->new_ctrl != dc[idx]) {
|
||||
cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
|
||||
dc[idx] = cfg->new_ctrl;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
|
||||
{
|
||||
struct resctrl_staged_config *cfg;
|
||||
struct rdt_hw_domain *hw_dom;
|
||||
struct msr_param msr_param;
|
||||
enum resctrl_conf_type t;
|
||||
cpumask_var_t cpu_mask;
|
||||
struct rdt_domain *d;
|
||||
bool mba_sc;
|
||||
u32 *dc;
|
||||
int cpu;
|
||||
u32 idx;
|
||||
|
||||
if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
msr_param.low = closid;
|
||||
msr_param.high = msr_param.low + 1;
|
||||
msr_param.res = r;
|
||||
|
||||
mba_sc = is_mba_sc(r);
|
||||
msr_param.res = NULL;
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
dc = !mba_sc ? d->ctrl_val : d->mbps_val;
|
||||
if (d->have_new_ctrl && d->new_ctrl != dc[closid]) {
|
||||
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
|
||||
dc[closid] = d->new_ctrl;
|
||||
hw_dom = resctrl_to_arch_dom(d);
|
||||
for (t = 0; t < CDP_NUM_TYPES; t++) {
|
||||
cfg = &hw_dom->d_resctrl.staged_config[t];
|
||||
if (!cfg->have_new_ctrl)
|
||||
continue;
|
||||
|
||||
idx = get_config_index(closid, t);
|
||||
if (!apply_config(hw_dom, cfg, idx, cpu_mask, mba_sc))
|
||||
continue;
|
||||
|
||||
if (!msr_param.res) {
|
||||
msr_param.low = idx;
|
||||
msr_param.high = msr_param.low + 1;
|
||||
msr_param.res = r;
|
||||
} else {
|
||||
msr_param.low = min(msr_param.low, idx);
|
||||
msr_param.high = max(msr_param.high, idx + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -284,11 +338,11 @@ done:
|
|||
static int rdtgroup_parse_resource(char *resname, char *tok,
|
||||
struct rdtgroup *rdtgrp)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
struct resctrl_schema *s;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
|
||||
return parse_line(tok, r, rdtgrp);
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
|
||||
return parse_line(tok, s, rdtgrp);
|
||||
}
|
||||
rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
|
||||
return -EINVAL;
|
||||
|
@ -297,6 +351,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok,
|
|||
ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||
char *buf, size_t nbytes, loff_t off)
|
||||
{
|
||||
struct resctrl_schema *s;
|
||||
struct rdtgroup *rdtgrp;
|
||||
struct rdt_domain *dom;
|
||||
struct rdt_resource *r;
|
||||
|
@ -327,9 +382,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
|||
goto out;
|
||||
}
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
list_for_each_entry(dom, &r->domains, list)
|
||||
dom->have_new_ctrl = false;
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
list_for_each_entry(dom, &s->res->domains, list)
|
||||
memset(dom->staged_config, 0, sizeof(dom->staged_config));
|
||||
}
|
||||
|
||||
while ((tok = strsep(&buf, "\n")) != NULL) {
|
||||
|
@ -349,8 +404,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
|||
goto out;
|
||||
}
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
ret = update_domains(r, rdtgrp->closid);
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
r = s->res;
|
||||
ret = resctrl_arch_update_domains(r, rdtgrp->closid);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
@ -371,19 +427,31 @@ out:
|
|||
return ret ?: nbytes;
|
||||
}
|
||||
|
||||
static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
|
||||
u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
|
||||
u32 closid, enum resctrl_conf_type type)
|
||||
{
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
u32 idx = get_config_index(closid, type);
|
||||
|
||||
if (!is_mba_sc(r))
|
||||
return hw_dom->ctrl_val[idx];
|
||||
return hw_dom->mbps_val[idx];
|
||||
}
|
||||
|
||||
static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
|
||||
{
|
||||
struct rdt_resource *r = schema->res;
|
||||
struct rdt_domain *dom;
|
||||
bool sep = false;
|
||||
u32 ctrl_val;
|
||||
|
||||
seq_printf(s, "%*s:", max_name_width, r->name);
|
||||
seq_printf(s, "%*s:", max_name_width, schema->name);
|
||||
list_for_each_entry(dom, &r->domains, list) {
|
||||
if (sep)
|
||||
seq_puts(s, ";");
|
||||
|
||||
ctrl_val = (!is_mba_sc(r) ? dom->ctrl_val[closid] :
|
||||
dom->mbps_val[closid]);
|
||||
ctrl_val = resctrl_arch_get_config(r, dom, closid,
|
||||
schema->conf_type);
|
||||
seq_printf(s, r->format_str, dom->id, max_data_width,
|
||||
ctrl_val);
|
||||
sep = true;
|
||||
|
@ -394,16 +462,17 @@ static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
|
|||
int rdtgroup_schemata_show(struct kernfs_open_file *of,
|
||||
struct seq_file *s, void *v)
|
||||
{
|
||||
struct resctrl_schema *schema;
|
||||
struct rdtgroup *rdtgrp;
|
||||
struct rdt_resource *r;
|
||||
int ret = 0;
|
||||
u32 closid;
|
||||
|
||||
rdtgrp = rdtgroup_kn_lock_live(of->kn);
|
||||
if (rdtgrp) {
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
for_each_alloc_enabled_rdt_resource(r)
|
||||
seq_printf(s, "%s:uninitialized\n", r->name);
|
||||
list_for_each_entry(schema, &resctrl_schema_all, list) {
|
||||
seq_printf(s, "%s:uninitialized\n", schema->name);
|
||||
}
|
||||
} else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
|
||||
if (!rdtgrp->plr->d) {
|
||||
rdt_last_cmd_clear();
|
||||
|
@ -411,15 +480,15 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
|
|||
ret = -ENODEV;
|
||||
} else {
|
||||
seq_printf(s, "%s:%d=%x\n",
|
||||
rdtgrp->plr->r->name,
|
||||
rdtgrp->plr->s->res->name,
|
||||
rdtgrp->plr->d->id,
|
||||
rdtgrp->plr->cbm);
|
||||
}
|
||||
} else {
|
||||
closid = rdtgrp->closid;
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
if (closid < r->num_closid)
|
||||
show_doms(s, r, closid);
|
||||
list_for_each_entry(schema, &resctrl_schema_all, list) {
|
||||
if (closid < schema->num_closid)
|
||||
show_doms(s, schema, closid);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -449,6 +518,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
|
|||
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct kernfs_open_file *of = m->private;
|
||||
struct rdt_hw_resource *hw_res;
|
||||
u32 resid, evtid, domid;
|
||||
struct rdtgroup *rdtgrp;
|
||||
struct rdt_resource *r;
|
||||
|
@ -468,7 +538,8 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
|||
domid = md.u.domid;
|
||||
evtid = md.u.evtid;
|
||||
|
||||
r = &rdt_resources_all[resid];
|
||||
hw_res = &rdt_resources_all[resid];
|
||||
r = &hw_res->r_resctrl;
|
||||
d = rdt_find_domain(r, domid, NULL);
|
||||
if (IS_ERR_OR_NULL(d)) {
|
||||
ret = -ENOENT;
|
||||
|
@ -482,7 +553,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
|||
else if (rr.val & RMID_VAL_UNAVAIL)
|
||||
seq_puts(m, "Unavailable\n");
|
||||
else
|
||||
seq_printf(m, "%llu\n", rr.val * r->mon_scale);
|
||||
seq_printf(m, "%llu\n", rr.val * hw_res->mon_scale);
|
||||
|
||||
out:
|
||||
rdtgroup_kn_unlock(of->kn);
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#ifndef _ASM_X86_RESCTRL_INTERNAL_H
|
||||
#define _ASM_X86_RESCTRL_INTERNAL_H
|
||||
|
||||
#include <linux/resctrl.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernfs.h>
|
||||
#include <linux/fs_context.h>
|
||||
|
@ -109,6 +110,7 @@ extern unsigned int resctrl_cqm_threshold;
|
|||
extern bool rdt_alloc_capable;
|
||||
extern bool rdt_mon_capable;
|
||||
extern unsigned int rdt_mon_features;
|
||||
extern struct list_head resctrl_schema_all;
|
||||
|
||||
enum rdt_group_type {
|
||||
RDTCTRL_GROUP = 0,
|
||||
|
@ -161,8 +163,8 @@ struct mongroup {
|
|||
|
||||
/**
|
||||
* struct pseudo_lock_region - pseudo-lock region information
|
||||
* @r: RDT resource to which this pseudo-locked region
|
||||
* belongs
|
||||
* @s: Resctrl schema for the resource to which this
|
||||
* pseudo-locked region belongs
|
||||
* @d: RDT domain to which this pseudo-locked region
|
||||
* belongs
|
||||
* @cbm: bitmask of the pseudo-locked region
|
||||
|
@ -182,7 +184,7 @@ struct mongroup {
|
|||
* @pm_reqs: Power management QoS requests related to this region
|
||||
*/
|
||||
struct pseudo_lock_region {
|
||||
struct rdt_resource *r;
|
||||
struct resctrl_schema *s;
|
||||
struct rdt_domain *d;
|
||||
u32 cbm;
|
||||
wait_queue_head_t lock_thread_wq;
|
||||
|
@ -303,44 +305,25 @@ struct mbm_state {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct rdt_domain - group of cpus sharing an RDT resource
|
||||
* @list: all instances of this resource
|
||||
* @id: unique id for this instance
|
||||
* @cpu_mask: which cpus share this resource
|
||||
* @rmid_busy_llc:
|
||||
* bitmap of which limbo RMIDs are above threshold
|
||||
* @mbm_total: saved state for MBM total bandwidth
|
||||
* @mbm_local: saved state for MBM local bandwidth
|
||||
* @mbm_over: worker to periodically read MBM h/w counters
|
||||
* @cqm_limbo: worker to periodically read CQM h/w counters
|
||||
* @mbm_work_cpu:
|
||||
* worker cpu for MBM h/w counters
|
||||
* @cqm_work_cpu:
|
||||
* worker cpu for CQM h/w counters
|
||||
* struct rdt_hw_domain - Arch private attributes of a set of CPUs that share
|
||||
* a resource
|
||||
* @d_resctrl: Properties exposed to the resctrl file system
|
||||
* @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
|
||||
* @mbps_val: When mba_sc is enabled, this holds the bandwidth in MBps
|
||||
* @new_ctrl: new ctrl value to be loaded
|
||||
* @have_new_ctrl: did user provide new_ctrl for this domain
|
||||
* @plr: pseudo-locked region (if any) associated with domain
|
||||
*
|
||||
* Members of this structure are accessed via helpers that provide abstraction.
|
||||
*/
|
||||
struct rdt_domain {
|
||||
struct list_head list;
|
||||
int id;
|
||||
struct cpumask cpu_mask;
|
||||
unsigned long *rmid_busy_llc;
|
||||
struct mbm_state *mbm_total;
|
||||
struct mbm_state *mbm_local;
|
||||
struct delayed_work mbm_over;
|
||||
struct delayed_work cqm_limbo;
|
||||
int mbm_work_cpu;
|
||||
int cqm_work_cpu;
|
||||
struct rdt_hw_domain {
|
||||
struct rdt_domain d_resctrl;
|
||||
u32 *ctrl_val;
|
||||
u32 *mbps_val;
|
||||
u32 new_ctrl;
|
||||
bool have_new_ctrl;
|
||||
struct pseudo_lock_region *plr;
|
||||
};
|
||||
|
||||
static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
|
||||
{
|
||||
return container_of(r, struct rdt_hw_domain, d_resctrl);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct msr_param - set a range of MSRs from a domain
|
||||
* @res: The resource to use
|
||||
|
@ -349,69 +332,8 @@ struct rdt_domain {
|
|||
*/
|
||||
struct msr_param {
|
||||
struct rdt_resource *res;
|
||||
int low;
|
||||
int high;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rdt_cache - Cache allocation related data
|
||||
* @cbm_len: Length of the cache bit mask
|
||||
* @min_cbm_bits: Minimum number of consecutive bits to be set
|
||||
* @cbm_idx_mult: Multiplier of CBM index
|
||||
* @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
|
||||
* closid * cbm_idx_multi + cbm_idx_offset
|
||||
* in a cache bit mask
|
||||
* @shareable_bits: Bitmask of shareable resource with other
|
||||
* executing entities
|
||||
* @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
|
||||
* @arch_has_empty_bitmaps: True if the '0' bitmap is valid.
|
||||
* @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
|
||||
* level has CPU scope.
|
||||
*/
|
||||
struct rdt_cache {
|
||||
unsigned int cbm_len;
|
||||
unsigned int min_cbm_bits;
|
||||
unsigned int cbm_idx_mult;
|
||||
unsigned int cbm_idx_offset;
|
||||
unsigned int shareable_bits;
|
||||
bool arch_has_sparse_bitmaps;
|
||||
bool arch_has_empty_bitmaps;
|
||||
bool arch_has_per_cpu_cfg;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum membw_throttle_mode - System's memory bandwidth throttling mode
|
||||
* @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
|
||||
* @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core
|
||||
* always using smallest bandwidth percentage
|
||||
* assigned to threads, aka "max throttling"
|
||||
* @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread
|
||||
*/
|
||||
enum membw_throttle_mode {
|
||||
THREAD_THROTTLE_UNDEFINED = 0,
|
||||
THREAD_THROTTLE_MAX,
|
||||
THREAD_THROTTLE_PER_THREAD,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rdt_membw - Memory bandwidth allocation related data
|
||||
* @min_bw: Minimum memory bandwidth percentage user can request
|
||||
* @bw_gran: Granularity at which the memory bandwidth is allocated
|
||||
* @delay_linear: True if memory B/W delay is in linear scale
|
||||
* @arch_needs_linear: True if we can't configure non-linear resources
|
||||
* @throttle_mode: Bandwidth throttling mode when threads request
|
||||
* different memory bandwidths
|
||||
* @mba_sc: True if MBA software controller(mba_sc) is enabled
|
||||
* @mb_map: Mapping of memory B/W percentage to memory B/W delay
|
||||
*/
|
||||
struct rdt_membw {
|
||||
u32 min_bw;
|
||||
u32 bw_gran;
|
||||
u32 delay_linear;
|
||||
bool arch_needs_linear;
|
||||
enum membw_throttle_mode throttle_mode;
|
||||
bool mba_sc;
|
||||
u32 *mb_map;
|
||||
u32 low;
|
||||
u32 high;
|
||||
};
|
||||
|
||||
static inline bool is_llc_occupancy_enabled(void)
|
||||
|
@ -446,111 +368,103 @@ struct rdt_parse_data {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct rdt_resource - attributes of an RDT resource
|
||||
* @rid: The index of the resource
|
||||
* @alloc_enabled: Is allocation enabled on this machine
|
||||
* @mon_enabled: Is monitoring enabled for this feature
|
||||
* @alloc_capable: Is allocation available on this machine
|
||||
* @mon_capable: Is monitor feature available on this machine
|
||||
* @name: Name to use in "schemata" file
|
||||
* @num_closid: Number of CLOSIDs available
|
||||
* @cache_level: Which cache level defines scope of this resource
|
||||
* @default_ctrl: Specifies default cache cbm or memory B/W percent.
|
||||
* struct rdt_hw_resource - arch private attributes of a resctrl resource
|
||||
* @r_resctrl: Attributes of the resource used directly by resctrl.
|
||||
* @num_closid: Maximum number of closid this hardware can support,
|
||||
* regardless of CDP. This is exposed via
|
||||
* resctrl_arch_get_num_closid() to avoid confusion
|
||||
* with struct resctrl_schema's property of the same name,
|
||||
* which has been corrected for features like CDP.
|
||||
* @msr_base: Base MSR address for CBMs
|
||||
* @msr_update: Function pointer to update QOS MSRs
|
||||
* @data_width: Character width of data when displaying
|
||||
* @domains: All domains for this resource
|
||||
* @cache: Cache allocation related data
|
||||
* @membw: If the component has bandwidth controls, their properties.
|
||||
* @format_str: Per resource format string to show domain value
|
||||
* @parse_ctrlval: Per resource function pointer to parse control values
|
||||
* @evt_list: List of monitoring events
|
||||
* @num_rmid: Number of RMIDs available
|
||||
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
|
||||
* @mbm_width: Monitor width, to detect and correct for overflow.
|
||||
* @fflags: flags to choose base and info files
|
||||
* @cdp_enabled: CDP state of this resource
|
||||
*
|
||||
* Members of this structure are either private to the architecture
|
||||
* e.g. mbm_width, or accessed via helpers that provide abstraction. e.g.
|
||||
* msr_update and msr_base.
|
||||
*/
|
||||
struct rdt_resource {
|
||||
int rid;
|
||||
bool alloc_enabled;
|
||||
bool mon_enabled;
|
||||
bool alloc_capable;
|
||||
bool mon_capable;
|
||||
char *name;
|
||||
int num_closid;
|
||||
int cache_level;
|
||||
u32 default_ctrl;
|
||||
struct rdt_hw_resource {
|
||||
struct rdt_resource r_resctrl;
|
||||
u32 num_closid;
|
||||
unsigned int msr_base;
|
||||
void (*msr_update) (struct rdt_domain *d, struct msr_param *m,
|
||||
struct rdt_resource *r);
|
||||
int data_width;
|
||||
struct list_head domains;
|
||||
struct rdt_cache cache;
|
||||
struct rdt_membw membw;
|
||||
const char *format_str;
|
||||
int (*parse_ctrlval)(struct rdt_parse_data *data,
|
||||
struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
struct list_head evt_list;
|
||||
int num_rmid;
|
||||
unsigned int mon_scale;
|
||||
unsigned int mbm_width;
|
||||
unsigned long fflags;
|
||||
bool cdp_enabled;
|
||||
};
|
||||
|
||||
int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r)
|
||||
{
|
||||
return container_of(r, struct rdt_hw_resource, r_resctrl);
|
||||
}
|
||||
|
||||
int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
|
||||
struct rdt_domain *d);
|
||||
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
|
||||
struct rdt_domain *d);
|
||||
|
||||
extern struct mutex rdtgroup_mutex;
|
||||
|
||||
extern struct rdt_resource rdt_resources_all[];
|
||||
extern struct rdt_hw_resource rdt_resources_all[];
|
||||
extern struct rdtgroup rdtgroup_default;
|
||||
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
|
||||
|
||||
extern struct dentry *debugfs_resctrl;
|
||||
|
||||
enum {
|
||||
enum resctrl_res_level {
|
||||
RDT_RESOURCE_L3,
|
||||
RDT_RESOURCE_L3DATA,
|
||||
RDT_RESOURCE_L3CODE,
|
||||
RDT_RESOURCE_L2,
|
||||
RDT_RESOURCE_L2DATA,
|
||||
RDT_RESOURCE_L2CODE,
|
||||
RDT_RESOURCE_MBA,
|
||||
|
||||
/* Must be the last */
|
||||
RDT_NUM_RESOURCES,
|
||||
};
|
||||
|
||||
static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(res);
|
||||
|
||||
hw_res++;
|
||||
return &hw_res->r_resctrl;
|
||||
}
|
||||
|
||||
static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)
|
||||
{
|
||||
return rdt_resources_all[l].cdp_enabled;
|
||||
}
|
||||
|
||||
int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
|
||||
|
||||
/*
|
||||
* To return the common struct rdt_resource, which is contained in struct
|
||||
* rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource.
|
||||
*/
|
||||
#define for_each_rdt_resource(r) \
|
||||
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
|
||||
r++)
|
||||
for (r = &rdt_resources_all[0].r_resctrl; \
|
||||
r <= &rdt_resources_all[RDT_NUM_RESOURCES - 1].r_resctrl; \
|
||||
r = resctrl_inc(r))
|
||||
|
||||
#define for_each_capable_rdt_resource(r) \
|
||||
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
|
||||
r++) \
|
||||
for_each_rdt_resource(r) \
|
||||
if (r->alloc_capable || r->mon_capable)
|
||||
|
||||
#define for_each_alloc_capable_rdt_resource(r) \
|
||||
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
|
||||
r++) \
|
||||
for_each_rdt_resource(r) \
|
||||
if (r->alloc_capable)
|
||||
|
||||
#define for_each_mon_capable_rdt_resource(r) \
|
||||
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
|
||||
r++) \
|
||||
for_each_rdt_resource(r) \
|
||||
if (r->mon_capable)
|
||||
|
||||
#define for_each_alloc_enabled_rdt_resource(r) \
|
||||
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
|
||||
r++) \
|
||||
for_each_rdt_resource(r) \
|
||||
if (r->alloc_enabled)
|
||||
|
||||
#define for_each_mon_enabled_rdt_resource(r) \
|
||||
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
|
||||
r++) \
|
||||
for_each_rdt_resource(r) \
|
||||
if (r->mon_enabled)
|
||||
|
||||
/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
|
||||
|
@ -594,7 +508,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
|||
char *buf, size_t nbytes, loff_t off);
|
||||
int rdtgroup_schemata_show(struct kernfs_open_file *of,
|
||||
struct seq_file *s, void *v);
|
||||
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
|
||||
bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
|
||||
unsigned long cbm, int closid, bool exclusive);
|
||||
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
|
||||
unsigned long cbm);
|
||||
|
@ -609,7 +523,6 @@ void rdt_pseudo_lock_release(void);
|
|||
int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
|
||||
void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
|
||||
struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
|
||||
int update_domains(struct rdt_resource *r, int closid);
|
||||
int closids_supported(void);
|
||||
void closid_free(int closid);
|
||||
int alloc_rmid(void);
|
||||
|
|
|
@ -174,7 +174,7 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
|
|||
struct rdt_resource *r;
|
||||
u32 crmid = 1, nrmid;
|
||||
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3];
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
|
||||
/*
|
||||
* Skip RMID 0 and start from RMID 1 and check all the RMIDs that
|
||||
|
@ -232,7 +232,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
|
|||
int cpu;
|
||||
u64 val;
|
||||
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3];
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
|
||||
entry->busy = 0;
|
||||
cpu = get_cpu();
|
||||
|
@ -287,6 +287,7 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
|
|||
|
||||
static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
|
||||
struct mbm_state *m;
|
||||
u64 chunks, tval;
|
||||
|
||||
|
@ -318,7 +319,7 @@ static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
chunks = mbm_overflow_count(m->prev_msr, tval, rr->r->mbm_width);
|
||||
chunks = mbm_overflow_count(m->prev_msr, tval, hw_res->mbm_width);
|
||||
m->chunks += chunks;
|
||||
m->prev_msr = tval;
|
||||
|
||||
|
@ -333,7 +334,7 @@ static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
|
|||
*/
|
||||
static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
|
||||
{
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
|
||||
struct mbm_state *m = &rr->d->mbm_local[rmid];
|
||||
u64 tval, cur_bw, chunks;
|
||||
|
||||
|
@ -341,8 +342,8 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
|
|||
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
|
||||
return;
|
||||
|
||||
chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width);
|
||||
cur_bw = (get_corrected_mbm_count(rmid, chunks) * r->mon_scale) >> 20;
|
||||
chunks = mbm_overflow_count(m->prev_bw_msr, tval, hw_res->mbm_width);
|
||||
cur_bw = (get_corrected_mbm_count(rmid, chunks) * hw_res->mon_scale) >> 20;
|
||||
|
||||
if (m->delta_comp)
|
||||
m->delta_bw = abs(cur_bw - m->prev_bw);
|
||||
|
@ -421,6 +422,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
|||
{
|
||||
u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
|
||||
struct mbm_state *pmbm_data, *cmbm_data;
|
||||
struct rdt_hw_resource *hw_r_mba;
|
||||
struct rdt_hw_domain *hw_dom_mba;
|
||||
u32 cur_bw, delta_bw, user_bw;
|
||||
struct rdt_resource *r_mba;
|
||||
struct rdt_domain *dom_mba;
|
||||
|
@ -430,7 +433,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
|||
if (!is_mbm_local_enabled())
|
||||
return;
|
||||
|
||||
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
|
||||
hw_r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
|
||||
r_mba = &hw_r_mba->r_resctrl;
|
||||
closid = rgrp->closid;
|
||||
rmid = rgrp->mon.rmid;
|
||||
pmbm_data = &dom_mbm->mbm_local[rmid];
|
||||
|
@ -440,11 +444,16 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
|||
pr_warn_once("Failure to get domain for MBA update\n");
|
||||
return;
|
||||
}
|
||||
hw_dom_mba = resctrl_to_arch_dom(dom_mba);
|
||||
|
||||
cur_bw = pmbm_data->prev_bw;
|
||||
user_bw = dom_mba->mbps_val[closid];
|
||||
user_bw = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
|
||||
delta_bw = pmbm_data->delta_bw;
|
||||
cur_msr_val = dom_mba->ctrl_val[closid];
|
||||
/*
|
||||
* resctrl_arch_get_config() chooses the mbps/ctrl value to return
|
||||
* based on is_mba_sc(). For now, reach into the hw_dom.
|
||||
*/
|
||||
cur_msr_val = hw_dom_mba->ctrl_val[closid];
|
||||
|
||||
/*
|
||||
* For Ctrl groups read data from child monitor groups.
|
||||
|
@ -479,9 +488,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
|||
return;
|
||||
}
|
||||
|
||||
cur_msr = r_mba->msr_base + closid;
|
||||
cur_msr = hw_r_mba->msr_base + closid;
|
||||
wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
|
||||
dom_mba->ctrl_val[closid] = new_msr_val;
|
||||
hw_dom_mba->ctrl_val[closid] = new_msr_val;
|
||||
|
||||
/*
|
||||
* Delta values are updated dynamically package wise for each
|
||||
|
@ -543,7 +552,7 @@ void cqm_handle_limbo(struct work_struct *work)
|
|||
|
||||
mutex_lock(&rdtgroup_mutex);
|
||||
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3];
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
d = container_of(work, struct rdt_domain, cqm_limbo.work);
|
||||
|
||||
__check_limbo(d, false);
|
||||
|
@ -579,7 +588,7 @@ void mbm_handle_overflow(struct work_struct *work)
|
|||
if (!static_branch_likely(&rdt_mon_enable_key))
|
||||
goto out_unlock;
|
||||
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3];
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
d = container_of(work, struct rdt_domain, mbm_over.work);
|
||||
|
||||
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
|
||||
|
@ -676,15 +685,16 @@ static void l3_mon_evt_init(struct rdt_resource *r)
|
|||
int rdt_get_mon_l3_config(struct rdt_resource *r)
|
||||
{
|
||||
unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
unsigned int cl_size = boot_cpu_data.x86_cache_size;
|
||||
int ret;
|
||||
|
||||
r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
|
||||
hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
|
||||
r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
|
||||
r->mbm_width = MBM_CNTR_WIDTH_BASE;
|
||||
hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
|
||||
|
||||
if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
|
||||
r->mbm_width += mbm_offset;
|
||||
hw_res->mbm_width += mbm_offset;
|
||||
else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
|
||||
pr_warn("Ignoring impossible MBM counter offset\n");
|
||||
|
||||
|
@ -698,7 +708,7 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
|
|||
resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
|
||||
|
||||
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
|
||||
resctrl_cqm_threshold /= r->mon_scale;
|
||||
resctrl_cqm_threshold /= hw_res->mon_scale;
|
||||
|
||||
ret = dom_data_init(r);
|
||||
if (ret)
|
||||
|
|
|
@ -250,7 +250,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
|
|||
plr->line_size = 0;
|
||||
kfree(plr->kmem);
|
||||
plr->kmem = NULL;
|
||||
plr->r = NULL;
|
||||
plr->s = NULL;
|
||||
if (plr->d)
|
||||
plr->d->plr = NULL;
|
||||
plr->d = NULL;
|
||||
|
@ -294,10 +294,10 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
|
|||
|
||||
ci = get_cpu_cacheinfo(plr->cpu);
|
||||
|
||||
plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
|
||||
plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
|
||||
|
||||
for (i = 0; i < ci->num_leaves; i++) {
|
||||
if (ci->info_list[i].level == plr->r->cache_level) {
|
||||
if (ci->info_list[i].level == plr->s->res->cache_level) {
|
||||
plr->line_size = ci->info_list[i].coherency_line_size;
|
||||
return 0;
|
||||
}
|
||||
|
@ -688,8 +688,8 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
|
|||
* resource, the portion of cache used by it should be made
|
||||
* unavailable to all future allocations from both resources.
|
||||
*/
|
||||
if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
|
||||
rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
|
||||
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) ||
|
||||
resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) {
|
||||
rdt_last_cmd_puts("CDP enabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -800,7 +800,7 @@ bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm
|
|||
unsigned long cbm_b;
|
||||
|
||||
if (d->plr) {
|
||||
cbm_len = d->plr->r->cache.cbm_len;
|
||||
cbm_len = d->plr->s->res->cache.cbm_len;
|
||||
cbm_b = d->plr->cbm;
|
||||
if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
|
||||
return true;
|
||||
|
|
|
@ -39,6 +39,9 @@ static struct kernfs_root *rdt_root;
|
|||
struct rdtgroup rdtgroup_default;
|
||||
LIST_HEAD(rdt_all_groups);
|
||||
|
||||
/* list of entries for the schemata file */
|
||||
LIST_HEAD(resctrl_schema_all);
|
||||
|
||||
/* Kernel fs node for "info" directory under root */
|
||||
static struct kernfs_node *kn_info;
|
||||
|
||||
|
@ -100,12 +103,12 @@ int closids_supported(void)
|
|||
|
||||
static void closid_init(void)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
int rdt_min_closid = 32;
|
||||
struct resctrl_schema *s;
|
||||
u32 rdt_min_closid = 32;
|
||||
|
||||
/* Compute rdt_min_closid across all resources */
|
||||
for_each_alloc_enabled_rdt_resource(r)
|
||||
rdt_min_closid = min(rdt_min_closid, r->num_closid);
|
||||
list_for_each_entry(s, &resctrl_schema_all, list)
|
||||
rdt_min_closid = min(rdt_min_closid, s->num_closid);
|
||||
|
||||
closid_free_map = BIT_MASK(rdt_min_closid) - 1;
|
||||
|
||||
|
@ -842,16 +845,17 @@ static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
|
|||
static int rdt_num_closids_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct resctrl_schema *s = of->kn->parent->priv;
|
||||
|
||||
seq_printf(seq, "%d\n", r->num_closid);
|
||||
seq_printf(seq, "%u\n", s->num_closid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rdt_default_ctrl_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct resctrl_schema *s = of->kn->parent->priv;
|
||||
struct rdt_resource *r = s->res;
|
||||
|
||||
seq_printf(seq, "%x\n", r->default_ctrl);
|
||||
return 0;
|
||||
|
@ -860,7 +864,8 @@ static int rdt_default_ctrl_show(struct kernfs_open_file *of,
|
|||
static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct resctrl_schema *s = of->kn->parent->priv;
|
||||
struct rdt_resource *r = s->res;
|
||||
|
||||
seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
|
||||
return 0;
|
||||
|
@ -869,7 +874,8 @@ static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
|
|||
static int rdt_shareable_bits_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct resctrl_schema *s = of->kn->parent->priv;
|
||||
struct rdt_resource *r = s->res;
|
||||
|
||||
seq_printf(seq, "%x\n", r->cache.shareable_bits);
|
||||
return 0;
|
||||
|
@ -892,38 +898,40 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
|
|||
static int rdt_bit_usage_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct resctrl_schema *s = of->kn->parent->priv;
|
||||
/*
|
||||
* Use unsigned long even though only 32 bits are used to ensure
|
||||
* test_bit() is used safely.
|
||||
*/
|
||||
unsigned long sw_shareable = 0, hw_shareable = 0;
|
||||
unsigned long exclusive = 0, pseudo_locked = 0;
|
||||
struct rdt_resource *r = s->res;
|
||||
struct rdt_domain *dom;
|
||||
int i, hwb, swb, excl, psl;
|
||||
enum rdtgrp_mode mode;
|
||||
bool sep = false;
|
||||
u32 *ctrl;
|
||||
u32 ctrl_val;
|
||||
|
||||
mutex_lock(&rdtgroup_mutex);
|
||||
hw_shareable = r->cache.shareable_bits;
|
||||
list_for_each_entry(dom, &r->domains, list) {
|
||||
if (sep)
|
||||
seq_putc(seq, ';');
|
||||
ctrl = dom->ctrl_val;
|
||||
sw_shareable = 0;
|
||||
exclusive = 0;
|
||||
seq_printf(seq, "%d=", dom->id);
|
||||
for (i = 0; i < closids_supported(); i++, ctrl++) {
|
||||
for (i = 0; i < closids_supported(); i++) {
|
||||
if (!closid_allocated(i))
|
||||
continue;
|
||||
ctrl_val = resctrl_arch_get_config(r, dom, i,
|
||||
s->conf_type);
|
||||
mode = rdtgroup_mode_by_closid(i);
|
||||
switch (mode) {
|
||||
case RDT_MODE_SHAREABLE:
|
||||
sw_shareable |= *ctrl;
|
||||
sw_shareable |= ctrl_val;
|
||||
break;
|
||||
case RDT_MODE_EXCLUSIVE:
|
||||
exclusive |= *ctrl;
|
||||
exclusive |= ctrl_val;
|
||||
break;
|
||||
case RDT_MODE_PSEUDO_LOCKSETUP:
|
||||
/*
|
||||
|
@ -970,7 +978,8 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
|
|||
static int rdt_min_bw_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct resctrl_schema *s = of->kn->parent->priv;
|
||||
struct rdt_resource *r = s->res;
|
||||
|
||||
seq_printf(seq, "%u\n", r->membw.min_bw);
|
||||
return 0;
|
||||
|
@ -1001,7 +1010,8 @@ static int rdt_mon_features_show(struct kernfs_open_file *of,
|
|||
static int rdt_bw_gran_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct resctrl_schema *s = of->kn->parent->priv;
|
||||
struct rdt_resource *r = s->res;
|
||||
|
||||
seq_printf(seq, "%u\n", r->membw.bw_gran);
|
||||
return 0;
|
||||
|
@ -1010,7 +1020,8 @@ static int rdt_bw_gran_show(struct kernfs_open_file *of,
|
|||
static int rdt_delay_linear_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct resctrl_schema *s = of->kn->parent->priv;
|
||||
struct rdt_resource *r = s->res;
|
||||
|
||||
seq_printf(seq, "%u\n", r->membw.delay_linear);
|
||||
return 0;
|
||||
|
@ -1020,8 +1031,9 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
|
|||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
|
||||
seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
|
||||
seq_printf(seq, "%u\n", resctrl_cqm_threshold * hw_res->mon_scale);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1029,7 +1041,8 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
|
|||
static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct resctrl_schema *s = of->kn->parent->priv;
|
||||
struct rdt_resource *r = s->res;
|
||||
|
||||
if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
|
||||
seq_puts(seq, "per-thread\n");
|
||||
|
@ -1042,7 +1055,7 @@ static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
|
|||
static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
|
||||
char *buf, size_t nbytes, loff_t off)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct rdt_hw_resource *hw_res;
|
||||
unsigned int bytes;
|
||||
int ret;
|
||||
|
||||
|
@ -1053,7 +1066,8 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
|
|||
if (bytes > (boot_cpu_data.x86_cache_size * 1024))
|
||||
return -EINVAL;
|
||||
|
||||
resctrl_cqm_threshold = bytes / r->mon_scale;
|
||||
hw_res = resctrl_to_arch_res(of->kn->parent->priv);
|
||||
resctrl_cqm_threshold = bytes / hw_res->mon_scale;
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
@ -1078,76 +1092,17 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rdt_cdp_peer_get - Retrieve CDP peer if it exists
|
||||
* @r: RDT resource to which RDT domain @d belongs
|
||||
* @d: Cache instance for which a CDP peer is requested
|
||||
* @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
|
||||
* Used to return the result.
|
||||
* @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
|
||||
* Used to return the result.
|
||||
*
|
||||
* RDT resources are managed independently and by extension the RDT domains
|
||||
* (RDT resource instances) are managed independently also. The Code and
|
||||
* Data Prioritization (CDP) RDT resources, while managed independently,
|
||||
* could refer to the same underlying hardware. For example,
|
||||
* RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
|
||||
*
|
||||
* When provided with an RDT resource @r and an instance of that RDT
|
||||
* resource @d rdt_cdp_peer_get() will return if there is a peer RDT
|
||||
* resource and the exact instance that shares the same hardware.
|
||||
*
|
||||
* Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
|
||||
* If a CDP peer was found, @r_cdp will point to the peer RDT resource
|
||||
* and @d_cdp will point to the peer RDT domain.
|
||||
*/
|
||||
static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
|
||||
struct rdt_resource **r_cdp,
|
||||
struct rdt_domain **d_cdp)
|
||||
static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
|
||||
{
|
||||
struct rdt_resource *_r_cdp = NULL;
|
||||
struct rdt_domain *_d_cdp = NULL;
|
||||
int ret = 0;
|
||||
|
||||
switch (r->rid) {
|
||||
case RDT_RESOURCE_L3DATA:
|
||||
_r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
|
||||
break;
|
||||
case RDT_RESOURCE_L3CODE:
|
||||
_r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA];
|
||||
break;
|
||||
case RDT_RESOURCE_L2DATA:
|
||||
_r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE];
|
||||
break;
|
||||
case RDT_RESOURCE_L2CODE:
|
||||
_r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA];
|
||||
break;
|
||||
switch (my_type) {
|
||||
case CDP_CODE:
|
||||
return CDP_DATA;
|
||||
case CDP_DATA:
|
||||
return CDP_CODE;
|
||||
default:
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
case CDP_NONE:
|
||||
return CDP_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* When a new CPU comes online and CDP is enabled then the new
|
||||
* RDT domains (if any) associated with both CDP RDT resources
|
||||
* are added in the same CPU online routine while the
|
||||
* rdtgroup_mutex is held. It should thus not happen for one
|
||||
* RDT domain to exist and be associated with its RDT CDP
|
||||
* resource but there is no RDT domain associated with the
|
||||
* peer RDT CDP resource. Hence the WARN.
|
||||
*/
|
||||
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
|
||||
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
|
||||
_r_cdp = NULL;
|
||||
_d_cdp = NULL;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
*r_cdp = _r_cdp;
|
||||
*d_cdp = _d_cdp;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1171,11 +1126,11 @@ out:
|
|||
* Return: false if CBM does not overlap, true if it does.
|
||||
*/
|
||||
static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
|
||||
unsigned long cbm, int closid, bool exclusive)
|
||||
unsigned long cbm, int closid,
|
||||
enum resctrl_conf_type type, bool exclusive)
|
||||
{
|
||||
enum rdtgrp_mode mode;
|
||||
unsigned long ctrl_b;
|
||||
u32 *ctrl;
|
||||
int i;
|
||||
|
||||
/* Check for any overlap with regions used by hardware directly */
|
||||
|
@ -1186,9 +1141,8 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
|
|||
}
|
||||
|
||||
/* Check for overlap with other resource groups */
|
||||
ctrl = d->ctrl_val;
|
||||
for (i = 0; i < closids_supported(); i++, ctrl++) {
|
||||
ctrl_b = *ctrl;
|
||||
for (i = 0; i < closids_supported(); i++) {
|
||||
ctrl_b = resctrl_arch_get_config(r, d, i, type);
|
||||
mode = rdtgroup_mode_by_closid(i);
|
||||
if (closid_allocated(i) && i != closid &&
|
||||
mode != RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
|
@ -1208,7 +1162,7 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
|
|||
|
||||
/**
|
||||
* rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
|
||||
* @r: Resource to which domain instance @d belongs.
|
||||
* @s: Schema for the resource to which domain instance @d belongs.
|
||||
* @d: The domain instance for which @closid is being tested.
|
||||
* @cbm: Capacity bitmask being tested.
|
||||
* @closid: Intended closid for @cbm.
|
||||
|
@ -1226,19 +1180,19 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
|
|||
*
|
||||
* Return: true if CBM overlap detected, false if there is no overlap
|
||||
*/
|
||||
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
|
||||
bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
|
||||
unsigned long cbm, int closid, bool exclusive)
|
||||
{
|
||||
struct rdt_resource *r_cdp;
|
||||
struct rdt_domain *d_cdp;
|
||||
enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
|
||||
struct rdt_resource *r = s->res;
|
||||
|
||||
if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
|
||||
if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
|
||||
exclusive))
|
||||
return true;
|
||||
|
||||
if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
|
||||
if (!resctrl_arch_get_cdp_enabled(r->rid))
|
||||
return false;
|
||||
|
||||
return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
|
||||
return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1256,17 +1210,21 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
|
|||
static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
|
||||
{
|
||||
int closid = rdtgrp->closid;
|
||||
struct resctrl_schema *s;
|
||||
struct rdt_resource *r;
|
||||
bool has_cache = false;
|
||||
struct rdt_domain *d;
|
||||
u32 ctrl;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
r = s->res;
|
||||
if (r->rid == RDT_RESOURCE_MBA)
|
||||
continue;
|
||||
has_cache = true;
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
|
||||
rdtgrp->closid, false)) {
|
||||
ctrl = resctrl_arch_get_config(r, d, closid,
|
||||
s->conf_type);
|
||||
if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
|
||||
rdt_last_cmd_puts("Schemata overlaps\n");
|
||||
return false;
|
||||
}
|
||||
|
@ -1397,6 +1355,7 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
|
|||
static int rdtgroup_size_show(struct kernfs_open_file *of,
|
||||
struct seq_file *s, void *v)
|
||||
{
|
||||
struct resctrl_schema *schema;
|
||||
struct rdtgroup *rdtgrp;
|
||||
struct rdt_resource *r;
|
||||
struct rdt_domain *d;
|
||||
|
@ -1418,8 +1377,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
|||
ret = -ENODEV;
|
||||
} else {
|
||||
seq_printf(s, "%*s:", max_name_width,
|
||||
rdtgrp->plr->r->name);
|
||||
size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
|
||||
rdtgrp->plr->s->name);
|
||||
size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
|
||||
rdtgrp->plr->d,
|
||||
rdtgrp->plr->cbm);
|
||||
seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
|
||||
|
@ -1427,18 +1386,19 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
|||
goto out;
|
||||
}
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
list_for_each_entry(schema, &resctrl_schema_all, list) {
|
||||
r = schema->res;
|
||||
sep = false;
|
||||
seq_printf(s, "%*s:", max_name_width, r->name);
|
||||
seq_printf(s, "%*s:", max_name_width, schema->name);
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
if (sep)
|
||||
seq_putc(s, ';');
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
size = 0;
|
||||
} else {
|
||||
ctrl = (!is_mba_sc(r) ?
|
||||
d->ctrl_val[rdtgrp->closid] :
|
||||
d->mbps_val[rdtgrp->closid]);
|
||||
ctrl = resctrl_arch_get_config(r, d,
|
||||
rdtgrp->closid,
|
||||
schema->conf_type);
|
||||
if (r->rid == RDT_RESOURCE_MBA)
|
||||
size = ctrl;
|
||||
else
|
||||
|
@ -1757,14 +1717,14 @@ int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
|
||||
static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
|
||||
unsigned long fflags)
|
||||
{
|
||||
struct kernfs_node *kn_subdir;
|
||||
int ret;
|
||||
|
||||
kn_subdir = kernfs_create_dir(kn_info, name,
|
||||
kn_info->mode, r);
|
||||
kn_info->mode, priv);
|
||||
if (IS_ERR(kn_subdir))
|
||||
return PTR_ERR(kn_subdir);
|
||||
|
||||
|
@ -1781,6 +1741,7 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
|
|||
|
||||
static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
|
||||
{
|
||||
struct resctrl_schema *s;
|
||||
struct rdt_resource *r;
|
||||
unsigned long fflags;
|
||||
char name[32];
|
||||
|
@ -1795,9 +1756,11 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
|
|||
if (ret)
|
||||
goto out_destroy;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
/* loop over enabled controls, these are all alloc_enabled */
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
r = s->res;
|
||||
fflags = r->fflags | RF_CTRL_INFO;
|
||||
ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
|
||||
ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
|
||||
if (ret)
|
||||
goto out_destroy;
|
||||
}
|
||||
|
@ -1867,7 +1830,7 @@ static void l2_qos_cfg_update(void *arg)
|
|||
|
||||
static inline bool is_mba_linear(void)
|
||||
{
|
||||
return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
|
||||
return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear;
|
||||
}
|
||||
|
||||
static int set_cache_qos_cfg(int level, bool enable)
|
||||
|
@ -1888,7 +1851,7 @@ static int set_cache_qos_cfg(int level, bool enable)
|
|||
if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
r_l = &rdt_resources_all[level];
|
||||
r_l = &rdt_resources_all[level].r_resctrl;
|
||||
list_for_each_entry(d, &r_l->domains, list) {
|
||||
if (r_l->cache.arch_has_per_cpu_cfg)
|
||||
/* Pick all the CPUs in the domain instance */
|
||||
|
@ -1914,14 +1877,16 @@ static int set_cache_qos_cfg(int level, bool enable)
|
|||
/* Restore the qos cfg state when a domain comes online */
|
||||
void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
|
||||
{
|
||||
if (!r->alloc_capable)
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
|
||||
if (!r->cdp_capable)
|
||||
return;
|
||||
|
||||
if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
|
||||
l2_qos_cfg_update(&r->alloc_enabled);
|
||||
if (r->rid == RDT_RESOURCE_L2)
|
||||
l2_qos_cfg_update(&hw_res->cdp_enabled);
|
||||
|
||||
if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
|
||||
l3_qos_cfg_update(&r->alloc_enabled);
|
||||
if (r->rid == RDT_RESOURCE_L3)
|
||||
l3_qos_cfg_update(&hw_res->cdp_enabled);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1932,7 +1897,8 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
|
|||
*/
|
||||
static int set_mba_sc(bool mba_sc)
|
||||
{
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
|
||||
struct rdt_hw_domain *hw_dom;
|
||||
struct rdt_domain *d;
|
||||
|
||||
if (!is_mbm_enabled() || !is_mba_linear() ||
|
||||
|
@ -1940,73 +1906,60 @@ static int set_mba_sc(bool mba_sc)
|
|||
return -EINVAL;
|
||||
|
||||
r->membw.mba_sc = mba_sc;
|
||||
list_for_each_entry(d, &r->domains, list)
|
||||
setup_default_ctrlval(r, d->ctrl_val, d->mbps_val);
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
hw_dom = resctrl_to_arch_dom(d);
|
||||
setup_default_ctrlval(r, hw_dom->ctrl_val, hw_dom->mbps_val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cdp_enable(int level, int data_type, int code_type)
|
||||
static int cdp_enable(int level)
|
||||
{
|
||||
struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
|
||||
struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
|
||||
struct rdt_resource *r_l = &rdt_resources_all[level];
|
||||
struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
|
||||
int ret;
|
||||
|
||||
if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
|
||||
!r_lcode->alloc_capable)
|
||||
if (!r_l->alloc_capable)
|
||||
return -EINVAL;
|
||||
|
||||
ret = set_cache_qos_cfg(level, true);
|
||||
if (!ret) {
|
||||
r_l->alloc_enabled = false;
|
||||
r_ldata->alloc_enabled = true;
|
||||
r_lcode->alloc_enabled = true;
|
||||
}
|
||||
if (!ret)
|
||||
rdt_resources_all[level].cdp_enabled = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cdpl3_enable(void)
|
||||
static void cdp_disable(int level)
|
||||
{
|
||||
return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
|
||||
RDT_RESOURCE_L3CODE);
|
||||
}
|
||||
struct rdt_hw_resource *r_hw = &rdt_resources_all[level];
|
||||
|
||||
static int cdpl2_enable(void)
|
||||
{
|
||||
return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
|
||||
RDT_RESOURCE_L2CODE);
|
||||
}
|
||||
|
||||
static void cdp_disable(int level, int data_type, int code_type)
|
||||
{
|
||||
struct rdt_resource *r = &rdt_resources_all[level];
|
||||
|
||||
r->alloc_enabled = r->alloc_capable;
|
||||
|
||||
if (rdt_resources_all[data_type].alloc_enabled) {
|
||||
rdt_resources_all[data_type].alloc_enabled = false;
|
||||
rdt_resources_all[code_type].alloc_enabled = false;
|
||||
if (r_hw->cdp_enabled) {
|
||||
set_cache_qos_cfg(level, false);
|
||||
r_hw->cdp_enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void cdpl3_disable(void)
|
||||
int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
|
||||
{
|
||||
cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
|
||||
}
|
||||
struct rdt_hw_resource *hw_res = &rdt_resources_all[l];
|
||||
|
||||
static void cdpl2_disable(void)
|
||||
{
|
||||
cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
|
||||
if (!hw_res->r_resctrl.cdp_capable)
|
||||
return -EINVAL;
|
||||
|
||||
if (enable)
|
||||
return cdp_enable(l);
|
||||
|
||||
cdp_disable(l);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cdp_disable_all(void)
|
||||
{
|
||||
if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
|
||||
cdpl3_disable();
|
||||
if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
|
||||
cdpl2_disable();
|
||||
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
|
||||
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
|
||||
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
|
||||
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2084,10 +2037,10 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx)
|
|||
int ret = 0;
|
||||
|
||||
if (ctx->enable_cdpl2)
|
||||
ret = cdpl2_enable();
|
||||
ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
|
||||
|
||||
if (!ret && ctx->enable_cdpl3)
|
||||
ret = cdpl3_enable();
|
||||
ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
|
||||
|
||||
if (!ret && ctx->enable_mba_mbps)
|
||||
ret = set_mba_sc(true);
|
||||
|
@ -2095,6 +2048,92 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
|
||||
{
|
||||
struct resctrl_schema *s;
|
||||
const char *suffix = "";
|
||||
int ret, cl;
|
||||
|
||||
s = kzalloc(sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
s->res = r;
|
||||
s->num_closid = resctrl_arch_get_num_closid(r);
|
||||
if (resctrl_arch_get_cdp_enabled(r->rid))
|
||||
s->num_closid /= 2;
|
||||
|
||||
s->conf_type = type;
|
||||
switch (type) {
|
||||
case CDP_CODE:
|
||||
suffix = "CODE";
|
||||
break;
|
||||
case CDP_DATA:
|
||||
suffix = "DATA";
|
||||
break;
|
||||
case CDP_NONE:
|
||||
suffix = "";
|
||||
break;
|
||||
}
|
||||
|
||||
ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
|
||||
if (ret >= sizeof(s->name)) {
|
||||
kfree(s);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cl = strlen(s->name);
|
||||
|
||||
/*
|
||||
* If CDP is supported by this resource, but not enabled,
|
||||
* include the suffix. This ensures the tabular format of the
|
||||
* schemata file does not change between mounts of the filesystem.
|
||||
*/
|
||||
if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
|
||||
cl += 4;
|
||||
|
||||
if (cl > max_name_width)
|
||||
max_name_width = cl;
|
||||
|
||||
INIT_LIST_HEAD(&s->list);
|
||||
list_add(&s->list, &resctrl_schema_all);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int schemata_list_create(void)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
int ret = 0;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
if (resctrl_arch_get_cdp_enabled(r->rid)) {
|
||||
ret = schemata_list_add(r, CDP_CODE);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
ret = schemata_list_add(r, CDP_DATA);
|
||||
} else {
|
||||
ret = schemata_list_add(r, CDP_NONE);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void schemata_list_destroy(void)
|
||||
{
|
||||
struct resctrl_schema *s, *tmp;
|
||||
|
||||
list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
|
||||
list_del(&s->list);
|
||||
kfree(s);
|
||||
}
|
||||
}
|
||||
|
||||
static int rdt_get_tree(struct fs_context *fc)
|
||||
{
|
||||
struct rdt_fs_context *ctx = rdt_fc2context(fc);
|
||||
|
@ -2116,11 +2155,17 @@ static int rdt_get_tree(struct fs_context *fc)
|
|||
if (ret < 0)
|
||||
goto out_cdp;
|
||||
|
||||
ret = schemata_list_create();
|
||||
if (ret) {
|
||||
schemata_list_destroy();
|
||||
goto out_mba;
|
||||
}
|
||||
|
||||
closid_init();
|
||||
|
||||
ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
|
||||
if (ret < 0)
|
||||
goto out_mba;
|
||||
goto out_schemata_free;
|
||||
|
||||
if (rdt_mon_capable) {
|
||||
ret = mongroup_create_dir(rdtgroup_default.kn,
|
||||
|
@ -2153,7 +2198,7 @@ static int rdt_get_tree(struct fs_context *fc)
|
|||
static_branch_enable_cpuslocked(&rdt_enable_key);
|
||||
|
||||
if (is_mbm_enabled()) {
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3];
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
list_for_each_entry(dom, &r->domains, list)
|
||||
mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
|
||||
}
|
||||
|
@ -2170,6 +2215,8 @@ out_mongrp:
|
|||
kernfs_remove(kn_mongrp);
|
||||
out_info:
|
||||
kernfs_remove(kn_info);
|
||||
out_schemata_free:
|
||||
schemata_list_destroy();
|
||||
out_mba:
|
||||
if (ctx->enable_mba_mbps)
|
||||
set_mba_sc(false);
|
||||
|
@ -2257,6 +2304,8 @@ static int rdt_init_fs_context(struct fs_context *fc)
|
|||
|
||||
static int reset_all_ctrls(struct rdt_resource *r)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
struct rdt_hw_domain *hw_dom;
|
||||
struct msr_param msr_param;
|
||||
cpumask_var_t cpu_mask;
|
||||
struct rdt_domain *d;
|
||||
|
@ -2267,7 +2316,7 @@ static int reset_all_ctrls(struct rdt_resource *r)
|
|||
|
||||
msr_param.res = r;
|
||||
msr_param.low = 0;
|
||||
msr_param.high = r->num_closid;
|
||||
msr_param.high = hw_res->num_closid;
|
||||
|
||||
/*
|
||||
* Disable resource control for this resource by setting all
|
||||
|
@ -2275,10 +2324,11 @@ static int reset_all_ctrls(struct rdt_resource *r)
|
|||
* from each domain to update the MSRs below.
|
||||
*/
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
hw_dom = resctrl_to_arch_dom(d);
|
||||
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
|
||||
|
||||
for (i = 0; i < r->num_closid; i++)
|
||||
d->ctrl_val[i] = r->default_ctrl;
|
||||
for (i = 0; i < hw_res->num_closid; i++)
|
||||
hw_dom->ctrl_val[i] = r->default_ctrl;
|
||||
}
|
||||
cpu = get_cpu();
|
||||
/* Update CBM on this cpu if it's in cpu_mask. */
|
||||
|
@ -2408,6 +2458,7 @@ static void rdt_kill_sb(struct super_block *sb)
|
|||
rmdir_all_sub();
|
||||
rdt_pseudo_lock_release();
|
||||
rdtgroup_default.mode = RDT_MODE_SHAREABLE;
|
||||
schemata_list_destroy();
|
||||
static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
|
||||
static_branch_disable_cpuslocked(&rdt_mon_enable_key);
|
||||
static_branch_disable_cpuslocked(&rdt_enable_key);
|
||||
|
@ -2642,23 +2693,24 @@ static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
|
|||
* Set the RDT domain up to start off with all usable allocations. That is,
|
||||
* all shareable and unused bits. All-zero CBM is invalid.
|
||||
*/
|
||||
static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
|
||||
static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
|
||||
u32 closid)
|
||||
{
|
||||
struct rdt_resource *r_cdp = NULL;
|
||||
struct rdt_domain *d_cdp = NULL;
|
||||
enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
|
||||
enum resctrl_conf_type t = s->conf_type;
|
||||
struct resctrl_staged_config *cfg;
|
||||
struct rdt_resource *r = s->res;
|
||||
u32 used_b = 0, unused_b = 0;
|
||||
unsigned long tmp_cbm;
|
||||
enum rdtgrp_mode mode;
|
||||
u32 peer_ctl, *ctrl;
|
||||
u32 peer_ctl, ctrl_val;
|
||||
int i;
|
||||
|
||||
rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
|
||||
d->have_new_ctrl = false;
|
||||
d->new_ctrl = r->cache.shareable_bits;
|
||||
cfg = &d->staged_config[t];
|
||||
cfg->have_new_ctrl = false;
|
||||
cfg->new_ctrl = r->cache.shareable_bits;
|
||||
used_b = r->cache.shareable_bits;
|
||||
ctrl = d->ctrl_val;
|
||||
for (i = 0; i < closids_supported(); i++, ctrl++) {
|
||||
for (i = 0; i < closids_supported(); i++) {
|
||||
if (closid_allocated(i) && i != closid) {
|
||||
mode = rdtgroup_mode_by_closid(i);
|
||||
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
|
||||
|
@ -2673,35 +2725,38 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
|
|||
* usage to ensure there is no overlap
|
||||
* with an exclusive group.
|
||||
*/
|
||||
if (d_cdp)
|
||||
peer_ctl = d_cdp->ctrl_val[i];
|
||||
if (resctrl_arch_get_cdp_enabled(r->rid))
|
||||
peer_ctl = resctrl_arch_get_config(r, d, i,
|
||||
peer_type);
|
||||
else
|
||||
peer_ctl = 0;
|
||||
used_b |= *ctrl | peer_ctl;
|
||||
ctrl_val = resctrl_arch_get_config(r, d, i,
|
||||
s->conf_type);
|
||||
used_b |= ctrl_val | peer_ctl;
|
||||
if (mode == RDT_MODE_SHAREABLE)
|
||||
d->new_ctrl |= *ctrl | peer_ctl;
|
||||
cfg->new_ctrl |= ctrl_val | peer_ctl;
|
||||
}
|
||||
}
|
||||
if (d->plr && d->plr->cbm > 0)
|
||||
used_b |= d->plr->cbm;
|
||||
unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
|
||||
unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
|
||||
d->new_ctrl |= unused_b;
|
||||
cfg->new_ctrl |= unused_b;
|
||||
/*
|
||||
* Force the initial CBM to be valid, user can
|
||||
* modify the CBM based on system availability.
|
||||
*/
|
||||
d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r);
|
||||
cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
|
||||
/*
|
||||
* Assign the u32 CBM to an unsigned long to ensure that
|
||||
* bitmap_weight() does not access out-of-bound memory.
|
||||
*/
|
||||
tmp_cbm = d->new_ctrl;
|
||||
tmp_cbm = cfg->new_ctrl;
|
||||
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
|
||||
rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
|
||||
rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id);
|
||||
return -ENOSPC;
|
||||
}
|
||||
d->have_new_ctrl = true;
|
||||
cfg->have_new_ctrl = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2716,13 +2771,13 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
|
|||
* If there are no more shareable bits available on any domain then
|
||||
* the entire allocation will fail.
|
||||
*/
|
||||
static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
|
||||
static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
|
||||
{
|
||||
struct rdt_domain *d;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
ret = __init_one_rdt_domain(d, r, closid);
|
||||
list_for_each_entry(d, &s->res->domains, list) {
|
||||
ret = __init_one_rdt_domain(d, s, closid);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
@ -2733,30 +2788,34 @@ static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
|
|||
/* Initialize MBA resource with default values. */
|
||||
static void rdtgroup_init_mba(struct rdt_resource *r)
|
||||
{
|
||||
struct resctrl_staged_config *cfg;
|
||||
struct rdt_domain *d;
|
||||
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
|
||||
d->have_new_ctrl = true;
|
||||
cfg = &d->staged_config[CDP_NONE];
|
||||
cfg->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
|
||||
cfg->have_new_ctrl = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize the RDT group's allocations. */
|
||||
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
{
|
||||
struct resctrl_schema *s;
|
||||
struct rdt_resource *r;
|
||||
int ret;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
r = s->res;
|
||||
if (r->rid == RDT_RESOURCE_MBA) {
|
||||
rdtgroup_init_mba(r);
|
||||
} else {
|
||||
ret = rdtgroup_init_cat(r, rdtgrp->closid);
|
||||
ret = rdtgroup_init_cat(s, rdtgrp->closid);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = update_domains(r, rdtgrp->closid);
|
||||
ret = resctrl_arch_update_domains(r, rdtgrp->closid);
|
||||
if (ret < 0) {
|
||||
rdt_last_cmd_puts("Failed to initialize allocations\n");
|
||||
return ret;
|
||||
|
@ -3124,13 +3183,13 @@ out:
|
|||
|
||||
static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
|
||||
{
|
||||
if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
|
||||
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
|
||||
seq_puts(seq, ",cdp");
|
||||
|
||||
if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
|
||||
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
|
||||
seq_puts(seq, ",cdpl2");
|
||||
|
||||
if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
|
||||
if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl))
|
||||
seq_puts(seq, ",mba_MBps");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
#ifndef _RESCTRL_H
|
||||
#define _RESCTRL_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/pid.h>
|
||||
|
||||
#ifdef CONFIG_PROC_CPU_RESCTRL
|
||||
|
@ -13,4 +15,186 @@ int proc_resctrl_show(struct seq_file *m,
|
|||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* enum resctrl_conf_type - The type of configuration.
|
||||
* @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
|
||||
* @CDP_CODE: Configuration applies to instruction fetches.
|
||||
* @CDP_DATA: Configuration applies to reads and writes.
|
||||
*/
|
||||
enum resctrl_conf_type {
|
||||
CDP_NONE,
|
||||
CDP_CODE,
|
||||
CDP_DATA,
|
||||
};
|
||||
|
||||
#define CDP_NUM_TYPES (CDP_DATA + 1)
|
||||
|
||||
/**
|
||||
* struct resctrl_staged_config - parsed configuration to be applied
|
||||
* @new_ctrl: new ctrl value to be loaded
|
||||
* @have_new_ctrl: whether the user provided new_ctrl is valid
|
||||
*/
|
||||
struct resctrl_staged_config {
|
||||
u32 new_ctrl;
|
||||
bool have_new_ctrl;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rdt_domain - group of CPUs sharing a resctrl resource
|
||||
* @list: all instances of this resource
|
||||
* @id: unique id for this instance
|
||||
* @cpu_mask: which CPUs share this resource
|
||||
* @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
|
||||
* @mbm_total: saved state for MBM total bandwidth
|
||||
* @mbm_local: saved state for MBM local bandwidth
|
||||
* @mbm_over: worker to periodically read MBM h/w counters
|
||||
* @cqm_limbo: worker to periodically read CQM h/w counters
|
||||
* @mbm_work_cpu: worker CPU for MBM h/w counters
|
||||
* @cqm_work_cpu: worker CPU for CQM h/w counters
|
||||
* @plr: pseudo-locked region (if any) associated with domain
|
||||
* @staged_config: parsed configuration to be applied
|
||||
*/
|
||||
struct rdt_domain {
|
||||
struct list_head list;
|
||||
int id;
|
||||
struct cpumask cpu_mask;
|
||||
unsigned long *rmid_busy_llc;
|
||||
struct mbm_state *mbm_total;
|
||||
struct mbm_state *mbm_local;
|
||||
struct delayed_work mbm_over;
|
||||
struct delayed_work cqm_limbo;
|
||||
int mbm_work_cpu;
|
||||
int cqm_work_cpu;
|
||||
struct pseudo_lock_region *plr;
|
||||
struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct resctrl_cache - Cache allocation related data
|
||||
* @cbm_len: Length of the cache bit mask
|
||||
* @min_cbm_bits: Minimum number of consecutive bits to be set
|
||||
* @shareable_bits: Bitmask of shareable resource with other
|
||||
* executing entities
|
||||
* @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
|
||||
* @arch_has_empty_bitmaps: True if the '0' bitmap is valid.
|
||||
* @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
|
||||
* level has CPU scope.
|
||||
*/
|
||||
struct resctrl_cache {
|
||||
unsigned int cbm_len;
|
||||
unsigned int min_cbm_bits;
|
||||
unsigned int shareable_bits;
|
||||
bool arch_has_sparse_bitmaps;
|
||||
bool arch_has_empty_bitmaps;
|
||||
bool arch_has_per_cpu_cfg;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum membw_throttle_mode - System's memory bandwidth throttling mode
|
||||
* @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
|
||||
* @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core
|
||||
* always using smallest bandwidth percentage
|
||||
* assigned to threads, aka "max throttling"
|
||||
* @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread
|
||||
*/
|
||||
enum membw_throttle_mode {
|
||||
THREAD_THROTTLE_UNDEFINED = 0,
|
||||
THREAD_THROTTLE_MAX,
|
||||
THREAD_THROTTLE_PER_THREAD,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct resctrl_membw - Memory bandwidth allocation related data
|
||||
* @min_bw: Minimum memory bandwidth percentage user can request
|
||||
* @bw_gran: Granularity at which the memory bandwidth is allocated
|
||||
* @delay_linear: True if memory B/W delay is in linear scale
|
||||
* @arch_needs_linear: True if we can't configure non-linear resources
|
||||
* @throttle_mode: Bandwidth throttling mode when threads request
|
||||
* different memory bandwidths
|
||||
* @mba_sc: True if MBA software controller(mba_sc) is enabled
|
||||
* @mb_map: Mapping of memory B/W percentage to memory B/W delay
|
||||
*/
|
||||
struct resctrl_membw {
|
||||
u32 min_bw;
|
||||
u32 bw_gran;
|
||||
u32 delay_linear;
|
||||
bool arch_needs_linear;
|
||||
enum membw_throttle_mode throttle_mode;
|
||||
bool mba_sc;
|
||||
u32 *mb_map;
|
||||
};
|
||||
|
||||
struct rdt_parse_data;
|
||||
struct resctrl_schema;
|
||||
|
||||
/**
|
||||
* struct rdt_resource - attributes of a resctrl resource
|
||||
* @rid: The index of the resource
|
||||
* @alloc_enabled: Is allocation enabled on this machine
|
||||
* @mon_enabled: Is monitoring enabled for this feature
|
||||
* @alloc_capable: Is allocation available on this machine
|
||||
* @mon_capable: Is monitor feature available on this machine
|
||||
* @num_rmid: Number of RMIDs available
|
||||
* @cache_level: Which cache level defines scope of this resource
|
||||
* @cache: Cache allocation related data
|
||||
* @membw: If the component has bandwidth controls, their properties.
|
||||
* @domains: All domains for this resource
|
||||
* @name: Name to use in "schemata" file.
|
||||
* @data_width: Character width of data when displaying
|
||||
* @default_ctrl: Specifies default cache cbm or memory B/W percent.
|
||||
* @format_str: Per resource format string to show domain value
|
||||
* @parse_ctrlval: Per resource function pointer to parse control values
|
||||
* @evt_list: List of monitoring events
|
||||
* @fflags: flags to choose base and info files
|
||||
* @cdp_capable: Is the CDP feature available on this resource
|
||||
*/
|
||||
struct rdt_resource {
|
||||
int rid;
|
||||
bool alloc_enabled;
|
||||
bool mon_enabled;
|
||||
bool alloc_capable;
|
||||
bool mon_capable;
|
||||
int num_rmid;
|
||||
int cache_level;
|
||||
struct resctrl_cache cache;
|
||||
struct resctrl_membw membw;
|
||||
struct list_head domains;
|
||||
char *name;
|
||||
int data_width;
|
||||
u32 default_ctrl;
|
||||
const char *format_str;
|
||||
int (*parse_ctrlval)(struct rdt_parse_data *data,
|
||||
struct resctrl_schema *s,
|
||||
struct rdt_domain *d);
|
||||
struct list_head evt_list;
|
||||
unsigned long fflags;
|
||||
bool cdp_capable;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct resctrl_schema - configuration abilities of a resource presented to
|
||||
* user-space
|
||||
* @list: Member of resctrl_schema_all.
|
||||
* @name: The name to use in the "schemata" file.
|
||||
* @conf_type: Whether this schema is specific to code/data.
|
||||
* @res: The resource structure exported by the architecture to describe
|
||||
* the hardware that is configured by this schema.
|
||||
* @num_closid: The number of closid that can be used with this schema. When
|
||||
* features like CDP are enabled, this will be lower than the
|
||||
* hardware supports for the resource.
|
||||
*/
|
||||
struct resctrl_schema {
|
||||
struct list_head list;
|
||||
char name[8];
|
||||
enum resctrl_conf_type conf_type;
|
||||
struct rdt_resource *res;
|
||||
u32 num_closid;
|
||||
};
|
||||
|
||||
/* The number of closid supported by this resource regardless of CDP */
|
||||
u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
|
||||
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
|
||||
u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
|
||||
u32 closid, enum resctrl_conf_type type);
|
||||
|
||||
#endif /* _RESCTRL_H */
|
||||
|
|
Loading…
Reference in New Issue