Path: blob/master/arch/powerpc/platforms/pseries/lparcfg.c
51589 views
// SPDX-License-Identifier: GPL-2.0-or-later1/*2* PowerPC64 LPAR Configuration Information Driver3*4* Dave Engebretsen [email protected]5* Copyright (c) 2003 Dave Engebretsen6* Will Schmidt [email protected]7* SPLPAR updates, Copyright (c) 2003 Will Schmidt IBM Corporation.8* seq_file updates, Copyright (c) 2004 Will Schmidt IBM Corporation.9* Nathan Lynch [email protected]10* Added lparcfg_write, Copyright (C) 2004 Nathan Lynch IBM Corporation.11*12* This driver creates a proc file at /proc/ppc64/lparcfg which contains13* keyword - value pairs that specify the configuration of the partition.14*/1516#include <linux/module.h>17#include <linux/types.h>18#include <linux/errno.h>19#include <linux/proc_fs.h>20#include <linux/init.h>21#include <asm/papr-sysparm.h>22#include <linux/seq_file.h>23#include <linux/slab.h>24#include <linux/uaccess.h>25#include <linux/hugetlb.h>26#include <asm/lppaca.h>27#include <asm/hvcall.h>28#include <asm/firmware.h>29#include <asm/rtas.h>30#include <asm/time.h>31#include <asm/vio.h>32#include <asm/mmu.h>33#include <asm/machdep.h>34#include <asm/drmem.h>3536#include "pseries.h"37#include "vas.h" /* pseries_vas_dlpar_cpu() */3839/*40* This isn't a module but we expose that to userspace41* via /proc so leave the definitions here42*/43#define MODULE_VERS "1.9"44#define MODULE_NAME "lparcfg"4546/* #define LPARCFG_DEBUG */4748/*49* Track sum of all purrs across all processors. This is used to further50* calculate usage values by different applications51*/52static void cpu_get_purr(void *arg)53{54atomic64_t *sum = arg;5556atomic64_add(mfspr(SPRN_PURR), sum);57}5859static unsigned long get_purr(void)60{61atomic64_t purr = ATOMIC64_INIT(0);6263on_each_cpu(cpu_get_purr, &purr, 1);6465return atomic64_read(&purr);66}6768/*69* Methods used to fetch LPAR data when running on a pSeries platform.70*/7172struct hvcall_ppp_data {73u64 entitlement;74u64 unallocated_entitlement;75u16 group_num;76u16 pool_num;77u8 capped;78u8 weight;79u8 unallocated_weight;80u8 resource_group_index;81u16 active_procs_in_resource_group;82u16 active_procs_in_pool;83u16 active_system_procs;84u16 phys_platform_procs;85u32 max_proc_cap_avail;86u32 entitled_proc_cap_avail;87};8889/*90* H_GET_PPP hcall returns info in 5 parms.91* entitled_capacity,unallocated_capacity,92* aggregation, resource_capability).93*94* R4 = Entitled Processor Capacity Percentage.95* R5 = Unallocated Processor Capacity Percentage.96* R6 (AABBCCDDEEFFGGHH).97* XXXX - reserved (0)98* XXXX - Active Cores in Resource Group99* XXXX - Group Number100* XXXX - Pool Number.101* R7 (IIJJKKLLMMNNOOPP).102* XX - Resource group Number103* XX - bit 0-6 reserved (0). bit 7 is Capped indicator.104* XX - variable processor Capacity Weight105* XX - Unallocated Variable Processor Capacity Weight.106* XXXX - Active processors in Physical Processor Pool.107* XXXX - Processors active on platform.108* R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1109* XXXX - Physical platform procs allocated to virtualization.110* XXXXXX - Max procs capacity % available to the partitions pool.111* XXXXXX - Entitled procs capacity % available to the112* partitions pool.113*/114static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)115{116unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};117long rc;118119rc = plpar_hcall9(H_GET_PPP, retbuf);120121ppp_data->entitlement = retbuf[0];122ppp_data->unallocated_entitlement = retbuf[1];123124ppp_data->active_procs_in_resource_group = (retbuf[2] >> 4 * 8) & 0xffff;125ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;126ppp_data->pool_num = retbuf[2] & 0xffff;127128ppp_data->resource_group_index = (retbuf[3] >> 7 * 8) & 0xff;129ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;130ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;131ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;132ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;133ppp_data->active_system_procs = retbuf[3] & 0xffff;134135ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;136ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;137ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;138139return rc;140}141142static void show_gpci_data(struct seq_file *m)143{144struct hv_gpci_request_buffer *buf;145unsigned int affinity_score;146long ret;147148buf = kmalloc(sizeof(*buf), GFP_KERNEL);149if (buf == NULL)150return;151152/*153* Show the local LPAR's affinity score.154*155* 0xB1 selects the Affinity_Domain_Info_By_Partition subcall.156* The score is at byte 0xB in the output buffer.157*/158memset(&buf->params, 0, sizeof(buf->params));159buf->params.counter_request = cpu_to_be32(0xB1);160buf->params.starting_index = cpu_to_be32(-1); /* local LPAR */161buf->params.counter_info_version_in = 0x5; /* v5+ for score */162ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(buf),163sizeof(*buf));164if (ret != H_SUCCESS) {165pr_debug("hcall failed: H_GET_PERF_COUNTER_INFO: %ld, %x\n",166ret, be32_to_cpu(buf->params.detail_rc));167goto out;168}169affinity_score = buf->bytes[0xB];170seq_printf(m, "partition_affinity_score=%u\n", affinity_score);171out:172kfree(buf);173}174175static long h_pic(unsigned long *pool_idle_time,176unsigned long *num_procs)177{178long rc;179unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = {0};180181rc = plpar_hcall(H_PIC, retbuf);182183if (pool_idle_time)184*pool_idle_time = retbuf[0];185if (num_procs)186*num_procs = retbuf[1];187188return rc;189}190191unsigned long boot_pool_idle_time;192193/*194* parse_ppp_data195* Parse out the data returned from h_get_ppp and h_pic196*/197static void parse_ppp_data(struct seq_file *m)198{199struct hvcall_ppp_data ppp_data;200struct device_node *root;201const __be32 *perf_level;202long rc;203204rc = h_get_ppp(&ppp_data);205if (rc)206return;207208seq_printf(m, "partition_entitled_capacity=%lld\n",209ppp_data.entitlement);210seq_printf(m, "group=%d\n", ppp_data.group_num);211seq_printf(m, "system_active_processors=%d\n",212ppp_data.active_system_procs);213214/* pool related entries are appropriate for shared configs */215if (lppaca_shared_proc()) {216unsigned long pool_idle_time, pool_procs;217218seq_printf(m, "pool=%d\n", ppp_data.pool_num);219220/* report pool_capacity in percentage */221seq_printf(m, "pool_capacity=%d\n",222ppp_data.active_procs_in_pool * 100);223224/* In case h_pic call is not successful, this would result in225* APP values being wrong in tools like lparstat.226*/227228if (h_pic(&pool_idle_time, &pool_procs) == H_SUCCESS) {229seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);230seq_printf(m, "pool_num_procs=%ld\n", pool_procs);231seq_printf(m, "boot_pool_idle_time=%ld\n", boot_pool_idle_time);232}233}234235seq_printf(m, "unallocated_capacity_weight=%d\n",236ppp_data.unallocated_weight);237seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);238seq_printf(m, "capped=%d\n", ppp_data.capped);239seq_printf(m, "unallocated_capacity=%lld\n",240ppp_data.unallocated_entitlement);241242if (ppp_data.active_procs_in_resource_group) {243seq_printf(m, "resource_group_number=%d\n",244ppp_data.resource_group_index);245seq_printf(m, "resource_group_active_processors=%d\n",246ppp_data.active_procs_in_resource_group);247}248249/* The last bits of information returned from h_get_ppp are only250* valid if the ibm,partition-performance-parameters-level251* property is >= 1.252*/253root = of_find_node_by_path("/");254if (root) {255perf_level = of_get_property(root,256"ibm,partition-performance-parameters-level",257NULL);258if (perf_level && (be32_to_cpup(perf_level) >= 1)) {259seq_printf(m,260"physical_procs_allocated_to_virtualization=%d\n",261ppp_data.phys_platform_procs);262seq_printf(m, "max_proc_capacity_available=%d\n",263ppp_data.max_proc_cap_avail);264seq_printf(m, "entitled_proc_capacity_available=%d\n",265ppp_data.entitled_proc_cap_avail);266}267268of_node_put(root);269}270}271272/**273* parse_mpp_data274* Parse out data returned from h_get_mpp275*/276static void parse_mpp_data(struct seq_file *m)277{278struct hvcall_mpp_data mpp_data;279int rc;280281rc = h_get_mpp(&mpp_data);282if (rc)283return;284285seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);286287if (mpp_data.mapped_mem != -1)288seq_printf(m, "mapped_entitled_memory=%ld\n",289mpp_data.mapped_mem);290291seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);292seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);293294seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);295seq_printf(m, "unallocated_entitled_memory_weight=%d\n",296mpp_data.unallocated_mem_weight);297seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",298mpp_data.unallocated_entitlement);299300if (mpp_data.pool_size != -1)301seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",302mpp_data.pool_size);303304seq_printf(m, "entitled_memory_loan_request=%ld\n",305mpp_data.loan_request);306307seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);308}309310/**311* parse_mpp_x_data312* Parse out data returned from h_get_mpp_x313*/314static void parse_mpp_x_data(struct seq_file *m)315{316struct hvcall_mpp_x_data mpp_x_data;317318if (!firmware_has_feature(FW_FEATURE_XCMO))319return;320if (h_get_mpp_x(&mpp_x_data))321return;322323seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);324325if (mpp_x_data.pool_coalesced_bytes)326seq_printf(m, "pool_coalesced_bytes=%ld\n",327mpp_x_data.pool_coalesced_bytes);328if (mpp_x_data.pool_purr_cycles)329seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);330if (mpp_x_data.pool_spurr_cycles)331seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);332}333334/*335* Read the lpar name using the RTAS ibm,get-system-parameter call.336*337* The name read through this call is updated if changes are made by the end338* user on the hypervisor side.339*340* Some hypervisor (like Qemu) may not provide this value. In that case, a non341* null value is returned.342*/343static int read_rtas_lpar_name(struct seq_file *m)344{345struct papr_sysparm_buf *buf;346int err;347348buf = papr_sysparm_buf_alloc();349if (!buf)350return -ENOMEM;351352err = papr_sysparm_get(PAPR_SYSPARM_LPAR_NAME, buf);353if (!err)354seq_printf(m, "partition_name=%s\n", buf->val);355356papr_sysparm_buf_free(buf);357return err;358}359360/*361* Read the LPAR name from the Device Tree.362*363* The value read in the DT is not updated if the end-user is touching the LPAR364* name on the hypervisor side.365*/366static int read_dt_lpar_name(struct seq_file *m)367{368struct device_node *root = of_find_node_by_path("/");369const char *name;370int ret;371372ret = of_property_read_string(root, "ibm,partition-name", &name);373of_node_put(root);374if (ret)375return -ENOENT;376377seq_printf(m, "partition_name=%s\n", name);378return 0;379}380381static void read_lpar_name(struct seq_file *m)382{383if (read_rtas_lpar_name(m))384read_dt_lpar_name(m);385}386387#define SPLPAR_MAXLENGTH 1026*(sizeof(char))388389/*390* parse_system_parameter_string()391* Retrieve the potential_processors, max_entitled_capacity and friends392* through the get-system-parameter rtas call. Replace keyword strings as393* necessary.394*/395static void parse_system_parameter_string(struct seq_file *m)396{397struct papr_sysparm_buf *buf;398399buf = papr_sysparm_buf_alloc();400if (!buf)401return;402403if (papr_sysparm_get(PAPR_SYSPARM_SHARED_PROC_LPAR_ATTRS, buf)) {404goto out_free;405} else {406const char *local_buffer;407int splpar_strlen;408int idx, w_idx;409char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);410411if (!workbuffer)412goto out_free;413414splpar_strlen = be16_to_cpu(buf->len);415local_buffer = buf->val;416417w_idx = 0;418idx = 0;419while ((*local_buffer) && (idx < splpar_strlen)) {420workbuffer[w_idx++] = local_buffer[idx++];421if ((local_buffer[idx] == ',')422|| (local_buffer[idx] == '\0')) {423workbuffer[w_idx] = '\0';424if (w_idx) {425/* avoid the empty string */426seq_printf(m, "%s\n", workbuffer);427}428memset(workbuffer, 0, SPLPAR_MAXLENGTH);429idx++; /* skip the comma */430w_idx = 0;431} else if (local_buffer[idx] == '=') {432/* code here to replace workbuffer contents433with different keyword strings */434if (0 == strcmp(workbuffer, "MaxEntCap")) {435strcpy(workbuffer,436"partition_max_entitled_capacity");437w_idx = strlen(workbuffer);438}439if (0 == strcmp(workbuffer, "MaxPlatProcs")) {440strcpy(workbuffer,441"system_potential_processors");442w_idx = strlen(workbuffer);443}444}445}446kfree(workbuffer);447local_buffer -= 2; /* back up over strlen value */448}449out_free:450papr_sysparm_buf_free(buf);451}452453/* Return the number of processors in the system.454* This function reads through the device tree and counts455* the virtual processors, this does not include threads.456*/457static int lparcfg_count_active_processors(void)458{459struct device_node *cpus_dn;460int count = 0;461462for_each_node_by_type(cpus_dn, "cpu") {463#ifdef LPARCFG_DEBUG464printk(KERN_ERR "cpus_dn %p\n", cpus_dn);465#endif466count++;467}468return count;469}470471static void pseries_cmo_data(struct seq_file *m)472{473int cpu;474unsigned long cmo_faults = 0;475unsigned long cmo_fault_time = 0;476477seq_printf(m, "cmo_enabled=%d\n", firmware_has_feature(FW_FEATURE_CMO));478479if (!firmware_has_feature(FW_FEATURE_CMO))480return;481482for_each_possible_cpu(cpu) {483cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);484cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);485}486487seq_printf(m, "cmo_faults=%lu\n", cmo_faults);488seq_printf(m, "cmo_fault_time_usec=%lu\n",489cmo_fault_time / tb_ticks_per_usec);490seq_printf(m, "cmo_primary_psp=%d\n", cmo_get_primary_psp());491seq_printf(m, "cmo_secondary_psp=%d\n", cmo_get_secondary_psp());492seq_printf(m, "cmo_page_size=%lu\n", cmo_get_page_size());493}494495static void splpar_dispatch_data(struct seq_file *m)496{497int cpu;498unsigned long dispatches = 0;499unsigned long dispatch_dispersions = 0;500501for_each_possible_cpu(cpu) {502dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);503dispatch_dispersions +=504be32_to_cpu(lppaca_of(cpu).dispersion_count);505}506507seq_printf(m, "dispatches=%lu\n", dispatches);508seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);509}510511static void parse_em_data(struct seq_file *m)512{513unsigned long retbuf[PLPAR_HCALL_BUFSIZE];514515if (firmware_has_feature(FW_FEATURE_LPAR) &&516plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)517seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);518}519520static void maxmem_data(struct seq_file *m)521{522unsigned long maxmem = 0;523524maxmem += (unsigned long)drmem_info->n_lmbs * drmem_info->lmb_size;525maxmem += hugetlb_total_pages() * PAGE_SIZE;526527seq_printf(m, "MaxMem=%lu\n", maxmem);528}529530static int pseries_lparcfg_data(struct seq_file *m, void *v)531{532int partition_potential_processors;533int partition_active_processors;534struct device_node *rtas_node;535const __be32 *lrdrp = NULL;536537rtas_node = of_find_node_by_path("/rtas");538if (rtas_node)539lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL);540541if (lrdrp == NULL) {542partition_potential_processors = num_possible_cpus();543} else {544partition_potential_processors = be32_to_cpup(lrdrp + 4);545}546of_node_put(rtas_node);547548partition_active_processors = lparcfg_count_active_processors();549550if (firmware_has_feature(FW_FEATURE_SPLPAR)) {551/* this call handles the ibm,get-system-parameter contents */552read_lpar_name(m);553parse_system_parameter_string(m);554parse_ppp_data(m);555parse_mpp_data(m);556parse_mpp_x_data(m);557pseries_cmo_data(m);558splpar_dispatch_data(m);559560seq_printf(m, "purr=%ld\n", get_purr());561seq_printf(m, "tbr=%ld\n", mftb());562} else { /* non SPLPAR case */563564seq_printf(m, "system_active_processors=%d\n",565partition_active_processors);566567seq_printf(m, "system_potential_processors=%d\n",568partition_potential_processors);569570seq_printf(m, "partition_max_entitled_capacity=%d\n",571partition_potential_processors * 100);572573seq_printf(m, "partition_entitled_capacity=%d\n",574partition_active_processors * 100);575}576577show_gpci_data(m);578579seq_printf(m, "partition_active_processors=%d\n",580partition_active_processors);581582seq_printf(m, "partition_potential_processors=%d\n",583partition_potential_processors);584585seq_printf(m, "shared_processor_mode=%d\n",586lppaca_shared_proc());587588#ifdef CONFIG_PPC_64S_HASH_MMU589if (!radix_enabled())590seq_printf(m, "slb_size=%d\n", mmu_slb_size);591#endif592parse_em_data(m);593maxmem_data(m);594595seq_printf(m, "security_flavor=%u\n", pseries_security_flavor);596597return 0;598}599600static ssize_t update_ppp(u64 *entitlement, u8 *weight)601{602struct hvcall_ppp_data ppp_data;603u8 new_weight;604u64 new_entitled;605ssize_t retval;606607/* Get our current parameters */608retval = h_get_ppp(&ppp_data);609if (retval)610return retval;611612if (entitlement) {613new_weight = ppp_data.weight;614new_entitled = *entitlement;615} else if (weight) {616new_weight = *weight;617new_entitled = ppp_data.entitlement;618} else619return -EINVAL;620621pr_debug("%s: current_entitled = %llu, current_weight = %u\n",622__func__, ppp_data.entitlement, ppp_data.weight);623624pr_debug("%s: new_entitled = %llu, new_weight = %u\n",625__func__, new_entitled, new_weight);626627retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);628return retval;629}630631/**632* update_mpp633*634* Update the memory entitlement and weight for the partition. Caller must635* specify either a new entitlement or weight, not both, to be updated636* since the h_set_mpp call takes both entitlement and weight as parameters.637*/638static ssize_t update_mpp(u64 *entitlement, u8 *weight)639{640struct hvcall_mpp_data mpp_data;641u64 new_entitled;642u8 new_weight;643ssize_t rc;644645if (entitlement) {646/* Check with vio to ensure the new memory entitlement647* can be handled.648*/649rc = vio_cmo_entitlement_update(*entitlement);650if (rc)651return rc;652}653654rc = h_get_mpp(&mpp_data);655if (rc)656return rc;657658if (entitlement) {659new_weight = mpp_data.mem_weight;660new_entitled = *entitlement;661} else if (weight) {662new_weight = *weight;663new_entitled = mpp_data.entitled_mem;664} else665return -EINVAL;666667pr_debug("%s: current_entitled = %lu, current_weight = %u\n",668__func__, mpp_data.entitled_mem, mpp_data.mem_weight);669670pr_debug("%s: new_entitled = %llu, new_weight = %u\n",671__func__, new_entitled, new_weight);672673rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);674return rc;675}676677/*678* Interface for changing system parameters (variable capacity weight679* and entitled capacity). Format of input is "param_name=value";680* anything after value is ignored. Valid parameters at this time are681* "partition_entitled_capacity" and "capacity_weight". We use682* H_SET_PPP to alter parameters.683*684* This function should be invoked only on systems with685* FW_FEATURE_SPLPAR.686*/687static ssize_t lparcfg_write(struct file *file, const char __user * buf,688size_t count, loff_t * off)689{690char kbuf[64];691char *tmp;692u64 new_entitled, *new_entitled_ptr = &new_entitled;693u8 new_weight, *new_weight_ptr = &new_weight;694ssize_t retval;695696if (!firmware_has_feature(FW_FEATURE_SPLPAR))697return -EINVAL;698699if (count > sizeof(kbuf))700return -EINVAL;701702if (copy_from_user(kbuf, buf, count))703return -EFAULT;704705kbuf[count - 1] = '\0';706tmp = strchr(kbuf, '=');707if (!tmp)708return -EINVAL;709710*tmp++ = '\0';711712if (!strcmp(kbuf, "partition_entitled_capacity")) {713char *endp;714*new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);715if (endp == tmp)716return -EINVAL;717718retval = update_ppp(new_entitled_ptr, NULL);719720if (retval == H_SUCCESS || retval == H_CONSTRAINED) {721/*722* The hypervisor assigns VAS resources based723* on entitled capacity for shared mode.724* Reconfig VAS windows based on DLPAR CPU events.725*/726if (pseries_vas_dlpar_cpu() != 0)727retval = H_HARDWARE;728}729} else if (!strcmp(kbuf, "capacity_weight")) {730char *endp;731*new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);732if (endp == tmp)733return -EINVAL;734735retval = update_ppp(NULL, new_weight_ptr);736} else if (!strcmp(kbuf, "entitled_memory")) {737char *endp;738*new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);739if (endp == tmp)740return -EINVAL;741742retval = update_mpp(new_entitled_ptr, NULL);743} else if (!strcmp(kbuf, "entitled_memory_weight")) {744char *endp;745*new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);746if (endp == tmp)747return -EINVAL;748749retval = update_mpp(NULL, new_weight_ptr);750} else751return -EINVAL;752753if (retval == H_SUCCESS || retval == H_CONSTRAINED) {754retval = count;755} else if (retval == H_BUSY) {756retval = -EBUSY;757} else if (retval == H_HARDWARE) {758retval = -EIO;759} else if (retval == H_PARAMETER) {760retval = -EINVAL;761}762763return retval;764}765766static int lparcfg_data(struct seq_file *m, void *v)767{768struct device_node *rootdn;769const char *model = "";770const char *system_id = "";771const char *tmp;772const __be32 *lp_index_ptr;773unsigned int lp_index = 0;774775seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);776777rootdn = of_find_node_by_path("/");778if (rootdn) {779tmp = of_get_property(rootdn, "model", NULL);780if (tmp)781model = tmp;782tmp = of_get_property(rootdn, "system-id", NULL);783if (tmp)784system_id = tmp;785lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",786NULL);787if (lp_index_ptr)788lp_index = be32_to_cpup(lp_index_ptr);789of_node_put(rootdn);790}791seq_printf(m, "serial_number=%s\n", system_id);792seq_printf(m, "system_type=%s\n", model);793seq_printf(m, "partition_id=%d\n", (int)lp_index);794795return pseries_lparcfg_data(m, v);796}797798static int lparcfg_open(struct inode *inode, struct file *file)799{800return single_open(file, lparcfg_data, NULL);801}802803static const struct proc_ops lparcfg_proc_ops = {804.proc_read = seq_read,805.proc_write = lparcfg_write,806.proc_open = lparcfg_open,807.proc_release = single_release,808.proc_lseek = seq_lseek,809};810811static int __init lparcfg_init(void)812{813umode_t mode = 0444;814long retval;815816/* Allow writing if we have FW_FEATURE_SPLPAR */817if (firmware_has_feature(FW_FEATURE_SPLPAR))818mode |= 0200;819820if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_proc_ops)) {821printk(KERN_ERR "Failed to create powerpc/lparcfg\n");822return -EIO;823}824825/* If this call fails, it would result in APP values826* being wrong for since boot reports of lparstat827*/828retval = h_pic(&boot_pool_idle_time, NULL);829830if (retval != H_SUCCESS)831pr_debug("H_PIC failed during lparcfg init retval: %ld\n",832retval);833834return 0;835}836machine_device_initcall(pseries, lparcfg_init);837838839