Path: blob/master/arch/powerpc/platforms/pseries/dtl.c
10818 views
/*1* Virtual Processor Dispatch Trace Log2*3* (C) Copyright IBM Corporation 20094*5* Author: Jeremy Kerr <[email protected]>6*7* This program is free software; you can redistribute it and/or modify8* it under the terms of the GNU General Public License as published by9* the Free Software Foundation; either version 2, or (at your option)10* any later version.11*12* This program is distributed in the hope that it will be useful,13* but WITHOUT ANY WARRANTY; without even the implied warranty of14* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the15* GNU General Public License for more details.16*17* You should have received a copy of the GNU General Public License18* along with this program; if not, write to the Free Software19* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.20*/2122#include <linux/init.h>23#include <linux/slab.h>24#include <linux/debugfs.h>25#include <linux/spinlock.h>26#include <asm/smp.h>27#include <asm/system.h>28#include <asm/uaccess.h>29#include <asm/firmware.h>30#include <asm/lppaca.h>3132#include "plpar_wrappers.h"3334struct dtl {35struct dtl_entry *buf;36struct dentry *file;37int cpu;38int buf_entries;39u64 last_idx;40spinlock_t lock;41};42static DEFINE_PER_CPU(struct dtl, cpu_dtl);4344/*45* Dispatch trace log event mask:46* 0x7: 0x1: voluntary virtual processor waits47* 0x2: time-slice preempts48* 0x4: virtual partition memory page faults49*/50static u8 dtl_event_mask = 0x7;515253/*54* Size of per-cpu log buffers. Firmware requires that the buffer does55* not cross a 4k boundary.56*/57static int dtl_buf_entries = N_DISPATCH_LOG;5859#ifdef CONFIG_VIRT_CPU_ACCOUNTING60struct dtl_ring {61u64 write_index;62struct dtl_entry *write_ptr;63struct dtl_entry *buf;64struct dtl_entry *buf_end;65u8 saved_dtl_mask;66};6768static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);6970static atomic_t dtl_count;7172/*73* The cpu accounting code controls the DTL ring buffer, and we get74* given entries as they are processed.75*/76static void consume_dtle(struct dtl_entry *dtle, u64 index)77{78struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings);79struct dtl_entry *wp = dtlr->write_ptr;80struct lppaca *vpa = local_paca->lppaca_ptr;8182if (!wp)83return;8485*wp = *dtle;86barrier();8788/* check for hypervisor ring buffer overflow, ignore this entry if so */89if (index + N_DISPATCH_LOG < vpa->dtl_idx)90return;9192++wp;93if (wp == dtlr->buf_end)94wp = dtlr->buf;95dtlr->write_ptr = wp;9697/* incrementing write_index makes the new entry visible */98smp_wmb();99++dtlr->write_index;100}101102static int dtl_start(struct dtl *dtl)103{104struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);105106dtlr->buf = dtl->buf;107dtlr->buf_end = dtl->buf + dtl->buf_entries;108dtlr->write_index = 0;109110/* setting write_ptr enables logging into our buffer */111smp_wmb();112dtlr->write_ptr = dtl->buf;113114/* enable event logging */115dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;116lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;117118dtl_consumer = consume_dtle;119atomic_inc(&dtl_count);120return 0;121}122123static void dtl_stop(struct dtl *dtl)124{125struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);126127dtlr->write_ptr = NULL;128smp_wmb();129130dtlr->buf = NULL;131132/* restore dtl_enable_mask */133lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;134135if (atomic_dec_and_test(&dtl_count))136dtl_consumer = NULL;137}138139static u64 dtl_current_index(struct dtl *dtl)140{141return per_cpu(dtl_rings, dtl->cpu).write_index;142}143144#else /* CONFIG_VIRT_CPU_ACCOUNTING */145146static int dtl_start(struct dtl *dtl)147{148unsigned long addr;149int ret, hwcpu;150151/* Register our dtl buffer with the hypervisor. The HV expects the152* buffer size to be passed in the second word of the buffer */153((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;154155hwcpu = get_hard_smp_processor_id(dtl->cpu);156addr = __pa(dtl->buf);157ret = register_dtl(hwcpu, addr);158if (ret) {159printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "160"failed with %d\n", __func__, dtl->cpu, hwcpu, ret);161return -EIO;162}163164/* set our initial buffer indices */165lppaca_of(dtl->cpu).dtl_idx = 0;166167/* ensure that our updates to the lppaca fields have occurred before168* we actually enable the logging */169smp_wmb();170171/* enable event logging */172lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;173174return 0;175}176177static void dtl_stop(struct dtl *dtl)178{179int hwcpu = get_hard_smp_processor_id(dtl->cpu);180181lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;182183unregister_dtl(hwcpu, __pa(dtl->buf));184}185186static u64 dtl_current_index(struct dtl *dtl)187{188return lppaca_of(dtl->cpu).dtl_idx;189}190#endif /* CONFIG_VIRT_CPU_ACCOUNTING */191192static int dtl_enable(struct dtl *dtl)193{194long int n_entries;195long int rc;196struct dtl_entry *buf = NULL;197198if (!dtl_cache)199return -ENOMEM;200201/* only allow one reader */202if (dtl->buf)203return -EBUSY;204205n_entries = dtl_buf_entries;206buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));207if (!buf) {208printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",209__func__, dtl->cpu);210return -ENOMEM;211}212213spin_lock(&dtl->lock);214rc = -EBUSY;215if (!dtl->buf) {216/* store the original allocation size for use during read */217dtl->buf_entries = n_entries;218dtl->buf = buf;219dtl->last_idx = 0;220rc = dtl_start(dtl);221if (rc)222dtl->buf = NULL;223}224spin_unlock(&dtl->lock);225226if (rc)227kmem_cache_free(dtl_cache, buf);228return rc;229}230231static void dtl_disable(struct dtl *dtl)232{233spin_lock(&dtl->lock);234dtl_stop(dtl);235kmem_cache_free(dtl_cache, dtl->buf);236dtl->buf = NULL;237dtl->buf_entries = 0;238spin_unlock(&dtl->lock);239}240241/* file interface */242243static int dtl_file_open(struct inode *inode, struct file *filp)244{245struct dtl *dtl = inode->i_private;246int rc;247248rc = dtl_enable(dtl);249if (rc)250return rc;251252filp->private_data = dtl;253return 0;254}255256static int dtl_file_release(struct inode *inode, struct file *filp)257{258struct dtl *dtl = inode->i_private;259dtl_disable(dtl);260return 0;261}262263static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,264loff_t *pos)265{266long int rc, n_read, n_req, read_size;267struct dtl *dtl;268u64 cur_idx, last_idx, i;269270if ((len % sizeof(struct dtl_entry)) != 0)271return -EINVAL;272273dtl = filp->private_data;274275/* requested number of entries to read */276n_req = len / sizeof(struct dtl_entry);277278/* actual number of entries read */279n_read = 0;280281spin_lock(&dtl->lock);282283cur_idx = dtl_current_index(dtl);284last_idx = dtl->last_idx;285286if (last_idx + dtl->buf_entries <= cur_idx)287last_idx = cur_idx - dtl->buf_entries + 1;288289if (last_idx + n_req > cur_idx)290n_req = cur_idx - last_idx;291292if (n_req > 0)293dtl->last_idx = last_idx + n_req;294295spin_unlock(&dtl->lock);296297if (n_req <= 0)298return 0;299300i = last_idx % dtl->buf_entries;301302/* read the tail of the buffer if we've wrapped */303if (i + n_req > dtl->buf_entries) {304read_size = dtl->buf_entries - i;305306rc = copy_to_user(buf, &dtl->buf[i],307read_size * sizeof(struct dtl_entry));308if (rc)309return -EFAULT;310311i = 0;312n_req -= read_size;313n_read += read_size;314buf += read_size * sizeof(struct dtl_entry);315}316317/* .. and now the head */318rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));319if (rc)320return -EFAULT;321322n_read += n_req;323324return n_read * sizeof(struct dtl_entry);325}326327static const struct file_operations dtl_fops = {328.open = dtl_file_open,329.release = dtl_file_release,330.read = dtl_file_read,331.llseek = no_llseek,332};333334static struct dentry *dtl_dir;335336static int dtl_setup_file(struct dtl *dtl)337{338char name[10];339340sprintf(name, "cpu-%d", dtl->cpu);341342dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);343if (!dtl->file)344return -ENOMEM;345346return 0;347}348349static int dtl_init(void)350{351struct dentry *event_mask_file, *buf_entries_file;352int rc, i;353354if (!firmware_has_feature(FW_FEATURE_SPLPAR))355return -ENODEV;356357/* set up common debugfs structure */358359rc = -ENOMEM;360dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);361if (!dtl_dir) {362printk(KERN_WARNING "%s: can't create dtl root dir\n",363__func__);364goto err;365}366367event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,368dtl_dir, &dtl_event_mask);369buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,370dtl_dir, &dtl_buf_entries);371372if (!event_mask_file || !buf_entries_file) {373printk(KERN_WARNING "%s: can't create dtl files\n", __func__);374goto err_remove_dir;375}376377/* set up the per-cpu log structures */378for_each_possible_cpu(i) {379struct dtl *dtl = &per_cpu(cpu_dtl, i);380spin_lock_init(&dtl->lock);381dtl->cpu = i;382383rc = dtl_setup_file(dtl);384if (rc)385goto err_remove_dir;386}387388return 0;389390err_remove_dir:391debugfs_remove_recursive(dtl_dir);392err:393return rc;394}395arch_initcall(dtl_init);396397398