Path: blob/master/arch/x86/kernel/cpu/amd_cache_disable.c
26493 views
// SPDX-License-Identifier: GPL-2.01/*2* AMD L3 cache_disable_{0,1} sysfs handling3* Documentation/ABI/testing/sysfs-devices-system-cpu4*/56#include <linux/cacheinfo.h>7#include <linux/capability.h>8#include <linux/pci.h>9#include <linux/sysfs.h>1011#include <asm/amd/nb.h>1213#include "cpu.h"1415/*16* L3 cache descriptors17*/18static void amd_calc_l3_indices(struct amd_northbridge *nb)19{20struct amd_l3_cache *l3 = &nb->l3_cache;21unsigned int sc0, sc1, sc2, sc3;22u32 val = 0;2324pci_read_config_dword(nb->misc, 0x1C4, &val);2526/* calculate subcache sizes */27l3->subcaches[0] = sc0 = !(val & BIT(0));28l3->subcaches[1] = sc1 = !(val & BIT(4));2930if (boot_cpu_data.x86 == 0x15) {31l3->subcaches[0] = sc0 += !(val & BIT(1));32l3->subcaches[1] = sc1 += !(val & BIT(5));33}3435l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));36l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));3738l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;39}4041/*42* check whether a slot used for disabling an L3 index is occupied.43* @l3: L3 cache descriptor44* @slot: slot number (0..1)45*46* @returns: the disabled index if used or negative value if slot free.47*/48static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned int slot)49{50unsigned int reg = 0;5152pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);5354/* check whether this slot is activated already */55if (reg & (3UL << 30))56return reg & 0xfff;5758return -1;59}6061static ssize_t show_cache_disable(struct cacheinfo *ci, char *buf, unsigned int slot)62{63int index;64struct amd_northbridge *nb = ci->priv;6566index = amd_get_l3_disable_slot(nb, slot);67if (index >= 0)68return sysfs_emit(buf, "%d\n", index);6970return sysfs_emit(buf, "FREE\n");71}7273#define SHOW_CACHE_DISABLE(slot) \74static ssize_t \75cache_disable_##slot##_show(struct device *dev, \76struct device_attribute *attr, char *buf) \77{ \78struct cacheinfo *ci = dev_get_drvdata(dev); \79return show_cache_disable(ci, buf, slot); \80}8182SHOW_CACHE_DISABLE(0)83SHOW_CACHE_DISABLE(1)8485static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,86unsigned int slot, unsigned long idx)87{88int i;8990idx |= BIT(30);9192/*93* disable index in all 4 subcaches94*/95for (i = 0; i < 4; i++) {96u32 reg = idx | (i << 20);9798if (!nb->l3_cache.subcaches[i])99continue;100101pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);102103/*104* We need to WBINVD on a core on the node containing the L3105* cache which indices we disable therefore a simple wbinvd()106* is not sufficient.107*/108wbinvd_on_cpu(cpu);109110reg |= BIT(31);111pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);112}113}114115/*116* disable a L3 cache index by using a disable-slot117*118* @l3: L3 cache descriptor119* @cpu: A CPU on the node containing the L3 cache120* @slot: slot number (0..1)121* @index: index to disable122*123* @return: 0 on success, error status on failure124*/125static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,126unsigned int slot, unsigned long index)127{128int ret = 0;129130/* check if @slot is already used or the index is already disabled */131ret = amd_get_l3_disable_slot(nb, slot);132if (ret >= 0)133return -EEXIST;134135if (index > nb->l3_cache.indices)136return -EINVAL;137138/* check whether the other slot has disabled the same index already */139if (index == amd_get_l3_disable_slot(nb, !slot))140return -EEXIST;141142amd_l3_disable_index(nb, cpu, slot, index);143144return 0;145}146147static ssize_t store_cache_disable(struct cacheinfo *ci, const char *buf,148size_t count, unsigned int slot)149{150struct amd_northbridge *nb = ci->priv;151unsigned long val = 0;152int cpu, err = 0;153154if (!capable(CAP_SYS_ADMIN))155return -EPERM;156157cpu = cpumask_first(&ci->shared_cpu_map);158159if (kstrtoul(buf, 10, &val) < 0)160return -EINVAL;161162err = amd_set_l3_disable_slot(nb, cpu, slot, val);163if (err) {164if (err == -EEXIST)165pr_warn("L3 slot %d in use/index already disabled!\n",166slot);167return err;168}169return count;170}171172#define STORE_CACHE_DISABLE(slot) \173static ssize_t \174cache_disable_##slot##_store(struct device *dev, \175struct device_attribute *attr, \176const char *buf, size_t count) \177{ \178struct cacheinfo *ci = dev_get_drvdata(dev); \179return store_cache_disable(ci, buf, count, slot); \180}181182STORE_CACHE_DISABLE(0)183STORE_CACHE_DISABLE(1)184185static ssize_t subcaches_show(struct device *dev, struct device_attribute *attr,186char *buf)187{188struct cacheinfo *ci = dev_get_drvdata(dev);189int cpu = cpumask_first(&ci->shared_cpu_map);190191return sysfs_emit(buf, "%x\n", amd_get_subcaches(cpu));192}193194static ssize_t subcaches_store(struct device *dev,195struct device_attribute *attr,196const char *buf, size_t count)197{198struct cacheinfo *ci = dev_get_drvdata(dev);199int cpu = cpumask_first(&ci->shared_cpu_map);200unsigned long val;201202if (!capable(CAP_SYS_ADMIN))203return -EPERM;204205if (kstrtoul(buf, 16, &val) < 0)206return -EINVAL;207208if (amd_set_subcaches(cpu, val))209return -EINVAL;210211return count;212}213214static DEVICE_ATTR_RW(cache_disable_0);215static DEVICE_ATTR_RW(cache_disable_1);216static DEVICE_ATTR_RW(subcaches);217218static umode_t cache_private_attrs_is_visible(struct kobject *kobj,219struct attribute *attr, int unused)220{221struct device *dev = kobj_to_dev(kobj);222struct cacheinfo *ci = dev_get_drvdata(dev);223umode_t mode = attr->mode;224225if (!ci->priv)226return 0;227228if ((attr == &dev_attr_subcaches.attr) &&229amd_nb_has_feature(AMD_NB_L3_PARTITIONING))230return mode;231232if ((attr == &dev_attr_cache_disable_0.attr ||233attr == &dev_attr_cache_disable_1.attr) &&234amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))235return mode;236237return 0;238}239240static struct attribute_group cache_private_group = {241.is_visible = cache_private_attrs_is_visible,242};243244static void init_amd_l3_attrs(void)245{246static struct attribute **amd_l3_attrs;247int n = 1;248249if (amd_l3_attrs) /* already initialized */250return;251252if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))253n += 2;254if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))255n += 1;256257amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);258if (!amd_l3_attrs)259return;260261n = 0;262if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {263amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;264amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;265}266if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))267amd_l3_attrs[n++] = &dev_attr_subcaches.attr;268269cache_private_group.attrs = amd_l3_attrs;270}271272const struct attribute_group *cache_get_priv_group(struct cacheinfo *ci)273{274struct amd_northbridge *nb = ci->priv;275276if (ci->level < 3 || !nb)277return NULL;278279if (nb && nb->l3_cache.indices)280init_amd_l3_attrs();281282return &cache_private_group;283}284285struct amd_northbridge *amd_init_l3_cache(int index)286{287struct amd_northbridge *nb;288int node;289290/* only for L3, and not in virtualized environments */291if (index < 3)292return NULL;293294node = topology_amd_node_id(smp_processor_id());295nb = node_to_amd_nb(node);296if (nb && !nb->l3_cache.indices)297amd_calc_l3_indices(nb);298299return nb;300}301302303