Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/s390/appldata/appldata_base.c
10817 views
1
/*
2
* arch/s390/appldata/appldata_base.c
3
*
4
* Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
5
* Exports appldata_register_ops() and appldata_unregister_ops() for the
6
* data gathering modules.
7
*
8
* Copyright IBM Corp. 2003, 2009
9
*
10
* Author: Gerald Schaefer <[email protected]>
11
*/
12
13
#define KMSG_COMPONENT "appldata"
14
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16
#include <linux/module.h>
17
#include <linux/init.h>
18
#include <linux/slab.h>
19
#include <linux/errno.h>
20
#include <linux/interrupt.h>
21
#include <linux/proc_fs.h>
22
#include <linux/mm.h>
23
#include <linux/swap.h>
24
#include <linux/pagemap.h>
25
#include <linux/sysctl.h>
26
#include <linux/notifier.h>
27
#include <linux/cpu.h>
28
#include <linux/workqueue.h>
29
#include <linux/suspend.h>
30
#include <linux/platform_device.h>
31
#include <asm/appldata.h>
32
#include <asm/timer.h>
33
#include <asm/uaccess.h>
34
#include <asm/io.h>
35
#include <asm/smp.h>
36
37
#include "appldata.h"
38
39
40
#define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
41
sampling interval in
42
milliseconds */
43
44
#define TOD_MICRO 0x01000 /* nr. of TOD clock units
45
for 1 microsecond */
46
47
static struct platform_device *appldata_pdev;
48
49
/*
50
* /proc entries (sysctl)
51
*/
52
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
53
static int appldata_timer_handler(ctl_table *ctl, int write,
54
void __user *buffer, size_t *lenp, loff_t *ppos);
55
static int appldata_interval_handler(ctl_table *ctl, int write,
56
void __user *buffer,
57
size_t *lenp, loff_t *ppos);
58
59
static struct ctl_table_header *appldata_sysctl_header;
60
static struct ctl_table appldata_table[] = {
61
{
62
.procname = "timer",
63
.mode = S_IRUGO | S_IWUSR,
64
.proc_handler = appldata_timer_handler,
65
},
66
{
67
.procname = "interval",
68
.mode = S_IRUGO | S_IWUSR,
69
.proc_handler = appldata_interval_handler,
70
},
71
{ },
72
};
73
74
static struct ctl_table appldata_dir_table[] = {
75
{
76
.procname = appldata_proc_name,
77
.maxlen = 0,
78
.mode = S_IRUGO | S_IXUGO,
79
.child = appldata_table,
80
},
81
{ },
82
};
83
84
/*
85
* Timer
86
*/
87
static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
88
static atomic_t appldata_expire_count = ATOMIC_INIT(0);
89
90
static DEFINE_SPINLOCK(appldata_timer_lock);
91
static int appldata_interval = APPLDATA_CPU_INTERVAL;
92
static int appldata_timer_active;
93
static int appldata_timer_suspended = 0;
94
95
/*
96
* Work queue
97
*/
98
static struct workqueue_struct *appldata_wq;
99
static void appldata_work_fn(struct work_struct *work);
100
static DECLARE_WORK(appldata_work, appldata_work_fn);
101
102
103
/*
104
* Ops list
105
*/
106
static DEFINE_MUTEX(appldata_ops_mutex);
107
static LIST_HEAD(appldata_ops_list);
108
109
110
/*************************** timer, work, DIAG *******************************/
111
/*
112
* appldata_timer_function()
113
*
114
* schedule work and reschedule timer
115
*/
116
static void appldata_timer_function(unsigned long data)
117
{
118
if (atomic_dec_and_test(&appldata_expire_count)) {
119
atomic_set(&appldata_expire_count, num_online_cpus());
120
queue_work(appldata_wq, (struct work_struct *) data);
121
}
122
}
123
124
/*
125
* appldata_work_fn()
126
*
127
* call data gathering function for each (active) module
128
*/
129
static void appldata_work_fn(struct work_struct *work)
130
{
131
struct list_head *lh;
132
struct appldata_ops *ops;
133
134
get_online_cpus();
135
mutex_lock(&appldata_ops_mutex);
136
list_for_each(lh, &appldata_ops_list) {
137
ops = list_entry(lh, struct appldata_ops, list);
138
if (ops->active == 1) {
139
ops->callback(ops->data);
140
}
141
}
142
mutex_unlock(&appldata_ops_mutex);
143
put_online_cpus();
144
}
145
146
/*
147
* appldata_diag()
148
*
149
* prepare parameter list, issue DIAG 0xDC
150
*/
151
int appldata_diag(char record_nr, u16 function, unsigned long buffer,
152
u16 length, char *mod_lvl)
153
{
154
struct appldata_product_id id = {
155
.prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
156
0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
157
.prod_fn = 0xD5D3, /* "NL" */
158
.version_nr = 0xF2F6, /* "26" */
159
.release_nr = 0xF0F1, /* "01" */
160
};
161
162
id.record_nr = record_nr;
163
id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
164
return appldata_asm(&id, function, (void *) buffer, length);
165
}
166
/************************ timer, work, DIAG <END> ****************************/
167
168
169
/****************************** /proc stuff **********************************/
170
171
/*
172
* appldata_mod_vtimer_wrap()
173
*
174
* wrapper function for mod_virt_timer(), because smp_call_function_single()
175
* accepts only one parameter.
176
*/
177
static void __appldata_mod_vtimer_wrap(void *p) {
178
struct {
179
struct vtimer_list *timer;
180
u64 expires;
181
} *args = p;
182
mod_virt_timer_periodic(args->timer, args->expires);
183
}
184
185
#define APPLDATA_ADD_TIMER 0
186
#define APPLDATA_DEL_TIMER 1
187
#define APPLDATA_MOD_TIMER 2
188
189
/*
190
* __appldata_vtimer_setup()
191
*
192
* Add, delete or modify virtual timers on all online cpus.
193
* The caller needs to get the appldata_timer_lock spinlock.
194
*/
195
static void
196
__appldata_vtimer_setup(int cmd)
197
{
198
u64 per_cpu_interval;
199
int i;
200
201
switch (cmd) {
202
case APPLDATA_ADD_TIMER:
203
if (appldata_timer_active)
204
break;
205
per_cpu_interval = (u64) (appldata_interval*1000 /
206
num_online_cpus()) * TOD_MICRO;
207
for_each_online_cpu(i) {
208
per_cpu(appldata_timer, i).expires = per_cpu_interval;
209
smp_call_function_single(i, add_virt_timer_periodic,
210
&per_cpu(appldata_timer, i),
211
1);
212
}
213
appldata_timer_active = 1;
214
break;
215
case APPLDATA_DEL_TIMER:
216
for_each_online_cpu(i)
217
del_virt_timer(&per_cpu(appldata_timer, i));
218
if (!appldata_timer_active)
219
break;
220
appldata_timer_active = 0;
221
atomic_set(&appldata_expire_count, num_online_cpus());
222
break;
223
case APPLDATA_MOD_TIMER:
224
per_cpu_interval = (u64) (appldata_interval*1000 /
225
num_online_cpus()) * TOD_MICRO;
226
if (!appldata_timer_active)
227
break;
228
for_each_online_cpu(i) {
229
struct {
230
struct vtimer_list *timer;
231
u64 expires;
232
} args;
233
args.timer = &per_cpu(appldata_timer, i);
234
args.expires = per_cpu_interval;
235
smp_call_function_single(i, __appldata_mod_vtimer_wrap,
236
&args, 1);
237
}
238
}
239
}
240
241
/*
242
* appldata_timer_handler()
243
*
244
* Start/Stop timer, show status of timer (0 = not active, 1 = active)
245
*/
246
static int
247
appldata_timer_handler(ctl_table *ctl, int write,
248
void __user *buffer, size_t *lenp, loff_t *ppos)
249
{
250
int len;
251
char buf[2];
252
253
if (!*lenp || *ppos) {
254
*lenp = 0;
255
return 0;
256
}
257
if (!write) {
258
len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
259
if (len > *lenp)
260
len = *lenp;
261
if (copy_to_user(buffer, buf, len))
262
return -EFAULT;
263
goto out;
264
}
265
len = *lenp;
266
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
267
return -EFAULT;
268
get_online_cpus();
269
spin_lock(&appldata_timer_lock);
270
if (buf[0] == '1')
271
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
272
else if (buf[0] == '0')
273
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
274
spin_unlock(&appldata_timer_lock);
275
put_online_cpus();
276
out:
277
*lenp = len;
278
*ppos += len;
279
return 0;
280
}
281
282
/*
283
* appldata_interval_handler()
284
*
285
* Set (CPU) timer interval for collection of data (in milliseconds), show
286
* current timer interval.
287
*/
288
static int
289
appldata_interval_handler(ctl_table *ctl, int write,
290
void __user *buffer, size_t *lenp, loff_t *ppos)
291
{
292
int len, interval;
293
char buf[16];
294
295
if (!*lenp || *ppos) {
296
*lenp = 0;
297
return 0;
298
}
299
if (!write) {
300
len = sprintf(buf, "%i\n", appldata_interval);
301
if (len > *lenp)
302
len = *lenp;
303
if (copy_to_user(buffer, buf, len))
304
return -EFAULT;
305
goto out;
306
}
307
len = *lenp;
308
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
309
return -EFAULT;
310
}
311
interval = 0;
312
sscanf(buf, "%i", &interval);
313
if (interval <= 0)
314
return -EINVAL;
315
316
get_online_cpus();
317
spin_lock(&appldata_timer_lock);
318
appldata_interval = interval;
319
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
320
spin_unlock(&appldata_timer_lock);
321
put_online_cpus();
322
out:
323
*lenp = len;
324
*ppos += len;
325
return 0;
326
}
327
328
/*
329
* appldata_generic_handler()
330
*
331
* Generic start/stop monitoring and DIAG, show status of
332
* monitoring (0 = not in process, 1 = in process)
333
*/
334
static int
335
appldata_generic_handler(ctl_table *ctl, int write,
336
void __user *buffer, size_t *lenp, loff_t *ppos)
337
{
338
struct appldata_ops *ops = NULL, *tmp_ops;
339
int rc, len, found;
340
char buf[2];
341
struct list_head *lh;
342
343
found = 0;
344
mutex_lock(&appldata_ops_mutex);
345
list_for_each(lh, &appldata_ops_list) {
346
tmp_ops = list_entry(lh, struct appldata_ops, list);
347
if (&tmp_ops->ctl_table[2] == ctl) {
348
found = 1;
349
}
350
}
351
if (!found) {
352
mutex_unlock(&appldata_ops_mutex);
353
return -ENODEV;
354
}
355
ops = ctl->data;
356
if (!try_module_get(ops->owner)) { // protect this function
357
mutex_unlock(&appldata_ops_mutex);
358
return -ENODEV;
359
}
360
mutex_unlock(&appldata_ops_mutex);
361
362
if (!*lenp || *ppos) {
363
*lenp = 0;
364
module_put(ops->owner);
365
return 0;
366
}
367
if (!write) {
368
len = sprintf(buf, ops->active ? "1\n" : "0\n");
369
if (len > *lenp)
370
len = *lenp;
371
if (copy_to_user(buffer, buf, len)) {
372
module_put(ops->owner);
373
return -EFAULT;
374
}
375
goto out;
376
}
377
len = *lenp;
378
if (copy_from_user(buf, buffer,
379
len > sizeof(buf) ? sizeof(buf) : len)) {
380
module_put(ops->owner);
381
return -EFAULT;
382
}
383
384
mutex_lock(&appldata_ops_mutex);
385
if ((buf[0] == '1') && (ops->active == 0)) {
386
// protect work queue callback
387
if (!try_module_get(ops->owner)) {
388
mutex_unlock(&appldata_ops_mutex);
389
module_put(ops->owner);
390
return -ENODEV;
391
}
392
ops->callback(ops->data); // init record
393
rc = appldata_diag(ops->record_nr,
394
APPLDATA_START_INTERVAL_REC,
395
(unsigned long) ops->data, ops->size,
396
ops->mod_lvl);
397
if (rc != 0) {
398
pr_err("Starting the data collection for %s "
399
"failed with rc=%d\n", ops->name, rc);
400
module_put(ops->owner);
401
} else
402
ops->active = 1;
403
} else if ((buf[0] == '0') && (ops->active == 1)) {
404
ops->active = 0;
405
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
406
(unsigned long) ops->data, ops->size,
407
ops->mod_lvl);
408
if (rc != 0)
409
pr_err("Stopping the data collection for %s "
410
"failed with rc=%d\n", ops->name, rc);
411
module_put(ops->owner);
412
}
413
mutex_unlock(&appldata_ops_mutex);
414
out:
415
*lenp = len;
416
*ppos += len;
417
module_put(ops->owner);
418
return 0;
419
}
420
421
/*************************** /proc stuff <END> *******************************/
422
423
424
/************************* module-ops management *****************************/
425
/*
426
* appldata_register_ops()
427
*
428
* update ops list, register /proc/sys entries
429
*/
430
int appldata_register_ops(struct appldata_ops *ops)
431
{
432
if (ops->size > APPLDATA_MAX_REC_SIZE)
433
return -EINVAL;
434
435
ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL);
436
if (!ops->ctl_table)
437
return -ENOMEM;
438
439
mutex_lock(&appldata_ops_mutex);
440
list_add(&ops->list, &appldata_ops_list);
441
mutex_unlock(&appldata_ops_mutex);
442
443
ops->ctl_table[0].procname = appldata_proc_name;
444
ops->ctl_table[0].maxlen = 0;
445
ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
446
ops->ctl_table[0].child = &ops->ctl_table[2];
447
448
ops->ctl_table[2].procname = ops->name;
449
ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
450
ops->ctl_table[2].proc_handler = appldata_generic_handler;
451
ops->ctl_table[2].data = ops;
452
453
ops->sysctl_header = register_sysctl_table(ops->ctl_table);
454
if (!ops->sysctl_header)
455
goto out;
456
return 0;
457
out:
458
mutex_lock(&appldata_ops_mutex);
459
list_del(&ops->list);
460
mutex_unlock(&appldata_ops_mutex);
461
kfree(ops->ctl_table);
462
return -ENOMEM;
463
}
464
465
/*
466
* appldata_unregister_ops()
467
*
468
* update ops list, unregister /proc entries, stop DIAG if necessary
469
*/
470
void appldata_unregister_ops(struct appldata_ops *ops)
471
{
472
mutex_lock(&appldata_ops_mutex);
473
list_del(&ops->list);
474
mutex_unlock(&appldata_ops_mutex);
475
unregister_sysctl_table(ops->sysctl_header);
476
kfree(ops->ctl_table);
477
}
478
/********************** module-ops management <END> **************************/
479
480
481
/**************************** suspend / resume *******************************/
482
static int appldata_freeze(struct device *dev)
483
{
484
struct appldata_ops *ops;
485
int rc;
486
struct list_head *lh;
487
488
get_online_cpus();
489
spin_lock(&appldata_timer_lock);
490
if (appldata_timer_active) {
491
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
492
appldata_timer_suspended = 1;
493
}
494
spin_unlock(&appldata_timer_lock);
495
put_online_cpus();
496
497
mutex_lock(&appldata_ops_mutex);
498
list_for_each(lh, &appldata_ops_list) {
499
ops = list_entry(lh, struct appldata_ops, list);
500
if (ops->active == 1) {
501
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
502
(unsigned long) ops->data, ops->size,
503
ops->mod_lvl);
504
if (rc != 0)
505
pr_err("Stopping the data collection for %s "
506
"failed with rc=%d\n", ops->name, rc);
507
}
508
}
509
mutex_unlock(&appldata_ops_mutex);
510
return 0;
511
}
512
513
static int appldata_restore(struct device *dev)
514
{
515
struct appldata_ops *ops;
516
int rc;
517
struct list_head *lh;
518
519
get_online_cpus();
520
spin_lock(&appldata_timer_lock);
521
if (appldata_timer_suspended) {
522
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
523
appldata_timer_suspended = 0;
524
}
525
spin_unlock(&appldata_timer_lock);
526
put_online_cpus();
527
528
mutex_lock(&appldata_ops_mutex);
529
list_for_each(lh, &appldata_ops_list) {
530
ops = list_entry(lh, struct appldata_ops, list);
531
if (ops->active == 1) {
532
ops->callback(ops->data); // init record
533
rc = appldata_diag(ops->record_nr,
534
APPLDATA_START_INTERVAL_REC,
535
(unsigned long) ops->data, ops->size,
536
ops->mod_lvl);
537
if (rc != 0) {
538
pr_err("Starting the data collection for %s "
539
"failed with rc=%d\n", ops->name, rc);
540
}
541
}
542
}
543
mutex_unlock(&appldata_ops_mutex);
544
return 0;
545
}
546
547
static int appldata_thaw(struct device *dev)
548
{
549
return appldata_restore(dev);
550
}
551
552
static const struct dev_pm_ops appldata_pm_ops = {
553
.freeze = appldata_freeze,
554
.thaw = appldata_thaw,
555
.restore = appldata_restore,
556
};
557
558
static struct platform_driver appldata_pdrv = {
559
.driver = {
560
.name = "appldata",
561
.owner = THIS_MODULE,
562
.pm = &appldata_pm_ops,
563
},
564
};
565
/************************* suspend / resume <END> ****************************/
566
567
568
/******************************* init / exit *********************************/
569
570
static void __cpuinit appldata_online_cpu(int cpu)
571
{
572
init_virt_timer(&per_cpu(appldata_timer, cpu));
573
per_cpu(appldata_timer, cpu).function = appldata_timer_function;
574
per_cpu(appldata_timer, cpu).data = (unsigned long)
575
&appldata_work;
576
atomic_inc(&appldata_expire_count);
577
spin_lock(&appldata_timer_lock);
578
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
579
spin_unlock(&appldata_timer_lock);
580
}
581
582
static void __cpuinit appldata_offline_cpu(int cpu)
583
{
584
del_virt_timer(&per_cpu(appldata_timer, cpu));
585
if (atomic_dec_and_test(&appldata_expire_count)) {
586
atomic_set(&appldata_expire_count, num_online_cpus());
587
queue_work(appldata_wq, &appldata_work);
588
}
589
spin_lock(&appldata_timer_lock);
590
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
591
spin_unlock(&appldata_timer_lock);
592
}
593
594
static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
595
unsigned long action,
596
void *hcpu)
597
{
598
switch (action) {
599
case CPU_ONLINE:
600
case CPU_ONLINE_FROZEN:
601
appldata_online_cpu((long) hcpu);
602
break;
603
case CPU_DEAD:
604
case CPU_DEAD_FROZEN:
605
appldata_offline_cpu((long) hcpu);
606
break;
607
default:
608
break;
609
}
610
return NOTIFY_OK;
611
}
612
613
static struct notifier_block __cpuinitdata appldata_nb = {
614
.notifier_call = appldata_cpu_notify,
615
};
616
617
/*
618
* appldata_init()
619
*
620
* init timer, register /proc entries
621
*/
622
static int __init appldata_init(void)
623
{
624
int i, rc;
625
626
rc = platform_driver_register(&appldata_pdrv);
627
if (rc)
628
return rc;
629
630
appldata_pdev = platform_device_register_simple("appldata", -1, NULL,
631
0);
632
if (IS_ERR(appldata_pdev)) {
633
rc = PTR_ERR(appldata_pdev);
634
goto out_driver;
635
}
636
appldata_wq = create_singlethread_workqueue("appldata");
637
if (!appldata_wq) {
638
rc = -ENOMEM;
639
goto out_device;
640
}
641
642
get_online_cpus();
643
for_each_online_cpu(i)
644
appldata_online_cpu(i);
645
put_online_cpus();
646
647
/* Register cpu hotplug notifier */
648
register_hotcpu_notifier(&appldata_nb);
649
650
appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
651
return 0;
652
653
out_device:
654
platform_device_unregister(appldata_pdev);
655
out_driver:
656
platform_driver_unregister(&appldata_pdrv);
657
return rc;
658
}
659
660
__initcall(appldata_init);
661
662
/**************************** init / exit <END> ******************************/
663
664
EXPORT_SYMBOL_GPL(appldata_register_ops);
665
EXPORT_SYMBOL_GPL(appldata_unregister_ops);
666
EXPORT_SYMBOL_GPL(appldata_diag);
667
668
#ifdef CONFIG_SWAP
669
EXPORT_SYMBOL_GPL(si_swapinfo);
670
#endif
671
EXPORT_SYMBOL_GPL(nr_threads);
672
EXPORT_SYMBOL_GPL(nr_running);
673
EXPORT_SYMBOL_GPL(nr_iowait);
674
675