Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/iucv/iucv.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* IUCV base infrastructure.
4
*
5
* Copyright IBM Corp. 2001, 2009
6
*
7
* Author(s):
8
* Original source:
9
* Alan Altmark ([email protected]) Sept. 2000
10
* Xenia Tkatschow ([email protected])
11
* 2Gb awareness and general cleanup:
12
* Fritz Elfert ([email protected], [email protected])
13
* Rewritten for af_iucv:
14
* Martin Schwidefsky <[email protected]>
15
* PM functions:
16
* Ursula Braun ([email protected])
17
*
18
* Documentation used:
19
* The original source
20
* CP Programming Service, IBM document # SC24-5760
21
*/
22
23
#define KMSG_COMPONENT "iucv"
24
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25
26
#include <linux/kernel_stat.h>
27
#include <linux/export.h>
28
#include <linux/module.h>
29
#include <linux/moduleparam.h>
30
#include <linux/spinlock.h>
31
#include <linux/kernel.h>
32
#include <linux/slab.h>
33
#include <linux/init.h>
34
#include <linux/interrupt.h>
35
#include <linux/list.h>
36
#include <linux/errno.h>
37
#include <linux/err.h>
38
#include <linux/device.h>
39
#include <linux/cpu.h>
40
#include <linux/reboot.h>
41
#include <net/iucv/iucv.h>
42
#include <linux/atomic.h>
43
#include <asm/machine.h>
44
#include <asm/ebcdic.h>
45
#include <asm/io.h>
46
#include <asm/irq.h>
47
#include <asm/smp.h>
48
49
/*
50
* FLAGS:
51
* All flags are defined in the field IPFLAGS1 of each function
52
* and can be found in CP Programming Services.
53
* IPSRCCLS - Indicates you have specified a source class.
54
* IPTRGCLS - Indicates you have specified a target class.
55
* IPFGPID - Indicates you have specified a pathid.
56
* IPFGMID - Indicates you have specified a message ID.
57
* IPNORPY - Indicates a one-way message. No reply expected.
58
* IPALL - Indicates that all paths are affected.
59
*/
60
#define IUCV_IPSRCCLS 0x01
61
#define IUCV_IPTRGCLS 0x01
62
#define IUCV_IPFGPID 0x02
63
#define IUCV_IPFGMID 0x04
64
#define IUCV_IPNORPY 0x10
65
#define IUCV_IPALL 0x80
66
67
static int iucv_bus_match(struct device *dev, const struct device_driver *drv)
68
{
69
return 0;
70
}
71
72
const struct bus_type iucv_bus = {
73
.name = "iucv",
74
.match = iucv_bus_match,
75
};
76
EXPORT_SYMBOL(iucv_bus);
77
78
static struct device *iucv_root;
79
80
static void iucv_release_device(struct device *device)
81
{
82
kfree(device);
83
}
84
85
struct device *iucv_alloc_device(const struct attribute_group **attrs,
86
struct device_driver *driver,
87
void *priv, const char *fmt, ...)
88
{
89
struct device *dev;
90
va_list vargs;
91
char buf[20];
92
int rc;
93
94
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
95
if (!dev)
96
goto out_error;
97
va_start(vargs, fmt);
98
vsnprintf(buf, sizeof(buf), fmt, vargs);
99
rc = dev_set_name(dev, "%s", buf);
100
va_end(vargs);
101
if (rc)
102
goto out_error;
103
dev->bus = &iucv_bus;
104
dev->parent = iucv_root;
105
dev->driver = driver;
106
dev->groups = attrs;
107
dev->release = iucv_release_device;
108
dev_set_drvdata(dev, priv);
109
return dev;
110
111
out_error:
112
kfree(dev);
113
return NULL;
114
}
115
EXPORT_SYMBOL(iucv_alloc_device);
116
117
static int iucv_available;
118
119
/* General IUCV interrupt structure */
120
struct iucv_irq_data {
121
u16 ippathid;
122
u8 ipflags1;
123
u8 iptype;
124
u32 res2[9];
125
};
126
127
struct iucv_irq_list {
128
struct list_head list;
129
struct iucv_irq_data data;
130
};
131
132
static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
133
static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE };
134
static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE };
135
136
/*
137
* Queue of interrupt buffers lock for delivery via the tasklet
138
* (fast but can't call smp_call_function).
139
*/
140
static LIST_HEAD(iucv_task_queue);
141
142
/*
143
* The tasklet for fast delivery of iucv interrupts.
144
*/
145
static void iucv_tasklet_fn(unsigned long);
146
static DECLARE_TASKLET_OLD(iucv_tasklet, iucv_tasklet_fn);
147
148
/*
149
* Queue of interrupt buffers for delivery via a work queue
150
* (slower but can call smp_call_function).
151
*/
152
static LIST_HEAD(iucv_work_queue);
153
154
/*
155
* The work element to deliver path pending interrupts.
156
*/
157
static void iucv_work_fn(struct work_struct *work);
158
static DECLARE_WORK(iucv_work, iucv_work_fn);
159
160
/*
161
* Spinlock protecting task and work queue.
162
*/
163
static DEFINE_SPINLOCK(iucv_queue_lock);
164
165
enum iucv_command_codes {
166
IUCV_QUERY = 0,
167
IUCV_RETRIEVE_BUFFER = 2,
168
IUCV_SEND = 4,
169
IUCV_RECEIVE = 5,
170
IUCV_REPLY = 6,
171
IUCV_REJECT = 8,
172
IUCV_PURGE = 9,
173
IUCV_ACCEPT = 10,
174
IUCV_CONNECT = 11,
175
IUCV_DECLARE_BUFFER = 12,
176
IUCV_QUIESCE = 13,
177
IUCV_RESUME = 14,
178
IUCV_SEVER = 15,
179
IUCV_SETMASK = 16,
180
IUCV_SETCONTROLMASK = 17,
181
};
182
183
/*
184
* Error messages that are used with the iucv_sever function. They get
185
* converted to EBCDIC.
186
*/
187
static char iucv_error_no_listener[16] = "NO LISTENER";
188
static char iucv_error_no_memory[16] = "NO MEMORY";
189
static char iucv_error_pathid[16] = "INVALID PATHID";
190
191
/*
192
* iucv_handler_list: List of registered handlers.
193
*/
194
static LIST_HEAD(iucv_handler_list);
195
196
/*
197
* iucv_path_table: array of pointers to iucv_path structures.
198
*/
199
static struct iucv_path **iucv_path_table;
200
static unsigned long iucv_max_pathid;
201
202
/*
203
* iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table
204
*/
205
static DEFINE_SPINLOCK(iucv_table_lock);
206
207
/*
208
* iucv_active_cpu: contains the number of the cpu executing the tasklet
209
* or the work handler. Needed for iucv_path_sever called from tasklet.
210
*/
211
static int iucv_active_cpu = -1;
212
213
/*
214
* Mutex and wait queue for iucv_register/iucv_unregister.
215
*/
216
static DEFINE_MUTEX(iucv_register_mutex);
217
218
/*
219
* Counter for number of non-smp capable handlers.
220
*/
221
static int iucv_nonsmp_handler;
222
223
/*
224
* IUCV control data structure. Used by iucv_path_accept, iucv_path_connect,
225
* iucv_path_quiesce and iucv_path_sever.
226
*/
227
struct iucv_cmd_control {
228
u16 ippathid;
229
u8 ipflags1;
230
u8 iprcode;
231
u16 ipmsglim;
232
u16 res1;
233
u8 ipvmid[8];
234
u8 ipuser[16];
235
u8 iptarget[8];
236
} __attribute__ ((packed,aligned(8)));
237
238
/*
239
* Data in parameter list iucv structure. Used by iucv_message_send,
240
* iucv_message_send2way and iucv_message_reply.
241
*/
242
struct iucv_cmd_dpl {
243
u16 ippathid;
244
u8 ipflags1;
245
u8 iprcode;
246
u32 ipmsgid;
247
u32 iptrgcls;
248
u8 iprmmsg[8];
249
u32 ipsrccls;
250
u32 ipmsgtag;
251
dma32_t ipbfadr2;
252
u32 ipbfln2f;
253
u32 res;
254
} __attribute__ ((packed,aligned(8)));
255
256
/*
257
* Data in buffer iucv structure. Used by iucv_message_receive,
258
* iucv_message_reject, iucv_message_send, iucv_message_send2way
259
* and iucv_declare_cpu.
260
*/
261
struct iucv_cmd_db {
262
u16 ippathid;
263
u8 ipflags1;
264
u8 iprcode;
265
u32 ipmsgid;
266
u32 iptrgcls;
267
dma32_t ipbfadr1;
268
u32 ipbfln1f;
269
u32 ipsrccls;
270
u32 ipmsgtag;
271
dma32_t ipbfadr2;
272
u32 ipbfln2f;
273
u32 res;
274
} __attribute__ ((packed,aligned(8)));
275
276
/*
277
* Purge message iucv structure. Used by iucv_message_purge.
278
*/
279
struct iucv_cmd_purge {
280
u16 ippathid;
281
u8 ipflags1;
282
u8 iprcode;
283
u32 ipmsgid;
284
u8 ipaudit[3];
285
u8 res1[5];
286
u32 res2;
287
u32 ipsrccls;
288
u32 ipmsgtag;
289
u32 res3[3];
290
} __attribute__ ((packed,aligned(8)));
291
292
/*
293
* Set mask iucv structure. Used by iucv_enable_cpu.
294
*/
295
struct iucv_cmd_set_mask {
296
u8 ipmask;
297
u8 res1[2];
298
u8 iprcode;
299
u32 res2[9];
300
} __attribute__ ((packed,aligned(8)));
301
302
union iucv_param {
303
struct iucv_cmd_control ctrl;
304
struct iucv_cmd_dpl dpl;
305
struct iucv_cmd_db db;
306
struct iucv_cmd_purge purge;
307
struct iucv_cmd_set_mask set_mask;
308
};
309
310
/*
311
* Anchor for per-cpu IUCV command parameter block.
312
*/
313
static union iucv_param *iucv_param[NR_CPUS];
314
static union iucv_param *iucv_param_irq[NR_CPUS];
315
316
/**
317
* __iucv_call_b2f0
318
* @command: identifier of IUCV call to CP.
319
* @parm: pointer to a struct iucv_parm block
320
*
321
* Calls CP to execute IUCV commands.
322
*
323
* Returns the result of the CP IUCV call.
324
*/
325
static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
326
{
327
unsigned long reg1 = virt_to_phys(parm);
328
int cc;
329
330
asm volatile(
331
" lgr 0,%[reg0]\n"
332
" lgr 1,%[reg1]\n"
333
" .long 0xb2f01000\n"
334
" ipm %[cc]\n"
335
" srl %[cc],28\n"
336
: [cc] "=&d" (cc), "+m" (*parm)
337
: [reg0] "d" ((unsigned long)command),
338
[reg1] "d" (reg1)
339
: "cc", "0", "1");
340
return cc;
341
}
342
343
static inline int iucv_call_b2f0(int command, union iucv_param *parm)
344
{
345
int ccode;
346
347
ccode = __iucv_call_b2f0(command, parm);
348
return ccode == 1 ? parm->ctrl.iprcode : ccode;
349
}
350
351
/*
352
* iucv_query_maxconn
353
*
354
* Determines the maximum number of connections that may be established.
355
*
356
* Returns the maximum number of connections or -EPERM is IUCV is not
357
* available.
358
*/
359
static int __iucv_query_maxconn(void *param, unsigned long *max_pathid)
360
{
361
unsigned long reg1 = virt_to_phys(param);
362
int cc;
363
364
asm volatile (
365
" lghi 0,%[cmd]\n"
366
" lgr 1,%[reg1]\n"
367
" .long 0xb2f01000\n"
368
" ipm %[cc]\n"
369
" srl %[cc],28\n"
370
" lgr %[reg1],1\n"
371
: [cc] "=&d" (cc), [reg1] "+&d" (reg1)
372
: [cmd] "K" (IUCV_QUERY)
373
: "cc", "0", "1");
374
*max_pathid = reg1;
375
return cc;
376
}
377
378
static int iucv_query_maxconn(void)
379
{
380
unsigned long max_pathid;
381
void *param;
382
int ccode;
383
384
param = kzalloc(sizeof(union iucv_param), GFP_KERNEL | GFP_DMA);
385
if (!param)
386
return -ENOMEM;
387
ccode = __iucv_query_maxconn(param, &max_pathid);
388
if (ccode == 0)
389
iucv_max_pathid = max_pathid;
390
kfree(param);
391
return ccode ? -EPERM : 0;
392
}
393
394
/**
395
* iucv_allow_cpu
396
* @data: unused
397
*
398
* Allow iucv interrupts on this cpu.
399
*/
400
static void iucv_allow_cpu(void *data)
401
{
402
int cpu = smp_processor_id();
403
union iucv_param *parm;
404
405
/*
406
* Enable all iucv interrupts.
407
* ipmask contains bits for the different interrupts
408
* 0x80 - Flag to allow nonpriority message pending interrupts
409
* 0x40 - Flag to allow priority message pending interrupts
410
* 0x20 - Flag to allow nonpriority message completion interrupts
411
* 0x10 - Flag to allow priority message completion interrupts
412
* 0x08 - Flag to allow IUCV control interrupts
413
*/
414
parm = iucv_param_irq[cpu];
415
memset(parm, 0, sizeof(union iucv_param));
416
parm->set_mask.ipmask = 0xf8;
417
iucv_call_b2f0(IUCV_SETMASK, parm);
418
419
/*
420
* Enable all iucv control interrupts.
421
* ipmask contains bits for the different interrupts
422
* 0x80 - Flag to allow pending connections interrupts
423
* 0x40 - Flag to allow connection complete interrupts
424
* 0x20 - Flag to allow connection severed interrupts
425
* 0x10 - Flag to allow connection quiesced interrupts
426
* 0x08 - Flag to allow connection resumed interrupts
427
*/
428
memset(parm, 0, sizeof(union iucv_param));
429
parm->set_mask.ipmask = 0xf8;
430
iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
431
/* Set indication that iucv interrupts are allowed for this cpu. */
432
cpumask_set_cpu(cpu, &iucv_irq_cpumask);
433
}
434
435
/**
436
* iucv_block_cpu
437
* @data: unused
438
*
439
* Block iucv interrupts on this cpu.
440
*/
441
static void iucv_block_cpu(void *data)
442
{
443
int cpu = smp_processor_id();
444
union iucv_param *parm;
445
446
/* Disable all iucv interrupts. */
447
parm = iucv_param_irq[cpu];
448
memset(parm, 0, sizeof(union iucv_param));
449
iucv_call_b2f0(IUCV_SETMASK, parm);
450
451
/* Clear indication that iucv interrupts are allowed for this cpu. */
452
cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
453
}
454
455
/**
456
* iucv_declare_cpu
457
* @data: unused
458
*
459
* Declare a interrupt buffer on this cpu.
460
*/
461
static void iucv_declare_cpu(void *data)
462
{
463
int cpu = smp_processor_id();
464
union iucv_param *parm;
465
int rc;
466
467
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
468
return;
469
470
/* Declare interrupt buffer. */
471
parm = iucv_param_irq[cpu];
472
memset(parm, 0, sizeof(union iucv_param));
473
parm->db.ipbfadr1 = virt_to_dma32(iucv_irq_data[cpu]);
474
rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
475
if (rc) {
476
char *err = "Unknown";
477
switch (rc) {
478
case 0x03:
479
err = "Directory error";
480
break;
481
case 0x0a:
482
err = "Invalid length";
483
break;
484
case 0x13:
485
err = "Buffer already exists";
486
break;
487
case 0x3e:
488
err = "Buffer overlap";
489
break;
490
case 0x5c:
491
err = "Paging or storage error";
492
break;
493
}
494
pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n",
495
cpu, rc, err);
496
return;
497
}
498
499
/* Set indication that an iucv buffer exists for this cpu. */
500
cpumask_set_cpu(cpu, &iucv_buffer_cpumask);
501
502
if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask))
503
/* Enable iucv interrupts on this cpu. */
504
iucv_allow_cpu(NULL);
505
else
506
/* Disable iucv interrupts on this cpu. */
507
iucv_block_cpu(NULL);
508
}
509
510
/**
511
* iucv_retrieve_cpu
512
* @data: unused
513
*
514
* Retrieve interrupt buffer on this cpu.
515
*/
516
static void iucv_retrieve_cpu(void *data)
517
{
518
int cpu = smp_processor_id();
519
union iucv_param *parm;
520
521
if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
522
return;
523
524
/* Block iucv interrupts. */
525
iucv_block_cpu(NULL);
526
527
/* Retrieve interrupt buffer. */
528
parm = iucv_param_irq[cpu];
529
iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
530
531
/* Clear indication that an iucv buffer exists for this cpu. */
532
cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
533
}
534
535
/*
536
* iucv_setmask_mp
537
*
538
* Allow iucv interrupts on all cpus.
539
*/
540
static void iucv_setmask_mp(void)
541
{
542
int cpu;
543
544
cpus_read_lock();
545
for_each_online_cpu(cpu)
546
/* Enable all cpus with a declared buffer. */
547
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
548
!cpumask_test_cpu(cpu, &iucv_irq_cpumask))
549
smp_call_function_single(cpu, iucv_allow_cpu,
550
NULL, 1);
551
cpus_read_unlock();
552
}
553
554
/*
555
* iucv_setmask_up
556
*
557
* Allow iucv interrupts on a single cpu.
558
*/
559
static void iucv_setmask_up(void)
560
{
561
static cpumask_t cpumask;
562
int cpu;
563
564
/* Disable all cpu but the first in cpu_irq_cpumask. */
565
cpumask_copy(&cpumask, &iucv_irq_cpumask);
566
cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
567
for_each_cpu(cpu, &cpumask)
568
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
569
}
570
571
/*
572
* iucv_enable
573
*
574
* This function makes iucv ready for use. It allocates the pathid
575
* table, declares an iucv interrupt buffer and enables the iucv
576
* interrupts. Called when the first user has registered an iucv
577
* handler.
578
*/
579
static int iucv_enable(void)
580
{
581
size_t alloc_size;
582
int cpu, rc;
583
584
cpus_read_lock();
585
rc = -ENOMEM;
586
alloc_size = iucv_max_pathid * sizeof(*iucv_path_table);
587
iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
588
if (!iucv_path_table)
589
goto out;
590
/* Declare per cpu buffers. */
591
rc = -EIO;
592
for_each_online_cpu(cpu)
593
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
594
if (cpumask_empty(&iucv_buffer_cpumask))
595
/* No cpu could declare an iucv buffer. */
596
goto out;
597
cpus_read_unlock();
598
return 0;
599
out:
600
kfree(iucv_path_table);
601
iucv_path_table = NULL;
602
cpus_read_unlock();
603
return rc;
604
}
605
606
/*
607
* iucv_disable
608
*
609
* This function shuts down iucv. It disables iucv interrupts, retrieves
610
* the iucv interrupt buffer and frees the pathid table. Called after the
611
* last user unregister its iucv handler.
612
*/
613
static void iucv_disable(void)
614
{
615
cpus_read_lock();
616
on_each_cpu(iucv_retrieve_cpu, NULL, 1);
617
kfree(iucv_path_table);
618
iucv_path_table = NULL;
619
cpus_read_unlock();
620
}
621
622
static int iucv_cpu_dead(unsigned int cpu)
623
{
624
kfree(iucv_param_irq[cpu]);
625
iucv_param_irq[cpu] = NULL;
626
kfree(iucv_param[cpu]);
627
iucv_param[cpu] = NULL;
628
kfree(iucv_irq_data[cpu]);
629
iucv_irq_data[cpu] = NULL;
630
return 0;
631
}
632
633
static int iucv_cpu_prepare(unsigned int cpu)
634
{
635
/* Note: GFP_DMA used to get memory below 2G */
636
iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
637
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
638
if (!iucv_irq_data[cpu])
639
goto out_free;
640
641
/* Allocate parameter blocks. */
642
iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
643
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
644
if (!iucv_param[cpu])
645
goto out_free;
646
647
iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
648
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
649
if (!iucv_param_irq[cpu])
650
goto out_free;
651
652
return 0;
653
654
out_free:
655
iucv_cpu_dead(cpu);
656
return -ENOMEM;
657
}
658
659
static int iucv_cpu_online(unsigned int cpu)
660
{
661
if (!iucv_path_table)
662
return 0;
663
iucv_declare_cpu(NULL);
664
return 0;
665
}
666
667
static int iucv_cpu_down_prep(unsigned int cpu)
668
{
669
cpumask_var_t cpumask;
670
int ret = 0;
671
672
if (!iucv_path_table)
673
return 0;
674
675
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
676
return -ENOMEM;
677
678
cpumask_copy(cpumask, &iucv_buffer_cpumask);
679
cpumask_clear_cpu(cpu, cpumask);
680
if (cpumask_empty(cpumask)) {
681
/* Can't offline last IUCV enabled cpu. */
682
ret = -EINVAL;
683
goto __free_cpumask;
684
}
685
686
iucv_retrieve_cpu(NULL);
687
if (!cpumask_empty(&iucv_irq_cpumask))
688
goto __free_cpumask;
689
690
smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
691
iucv_allow_cpu, NULL, 1);
692
693
__free_cpumask:
694
free_cpumask_var(cpumask);
695
return ret;
696
}
697
698
/**
699
* iucv_sever_pathid
700
* @pathid: path identification number.
701
* @userdata: 16-bytes of user data.
702
*
703
* Sever an iucv path to free up the pathid. Used internally.
704
*/
705
static int iucv_sever_pathid(u16 pathid, u8 *userdata)
706
{
707
union iucv_param *parm;
708
709
parm = iucv_param_irq[smp_processor_id()];
710
memset(parm, 0, sizeof(union iucv_param));
711
if (userdata)
712
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
713
parm->ctrl.ippathid = pathid;
714
return iucv_call_b2f0(IUCV_SEVER, parm);
715
}
716
717
/**
718
* __iucv_cleanup_queue
719
* @dummy: unused dummy argument
720
*
721
* Nop function called via smp_call_function to force work items from
722
* pending external iucv interrupts to the work queue.
723
*/
724
static void __iucv_cleanup_queue(void *dummy)
725
{
726
}
727
728
/**
729
* iucv_cleanup_queue
730
*
731
* Function called after a path has been severed to find all remaining
732
* work items for the now stale pathid. The caller needs to hold the
733
* iucv_table_lock.
734
*/
735
static void iucv_cleanup_queue(void)
736
{
737
struct iucv_irq_list *p, *n;
738
739
/*
740
* When a path is severed, the pathid can be reused immediately
741
* on a iucv connect or a connection pending interrupt. Remove
742
* all entries from the task queue that refer to a stale pathid
743
* (iucv_path_table[ix] == NULL). Only then do the iucv connect
744
* or deliver the connection pending interrupt. To get all the
745
* pending interrupts force them to the work queue by calling
746
* an empty function on all cpus.
747
*/
748
smp_call_function(__iucv_cleanup_queue, NULL, 1);
749
spin_lock_irq(&iucv_queue_lock);
750
list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
751
/* Remove stale work items from the task queue. */
752
if (iucv_path_table[p->data.ippathid] == NULL) {
753
list_del(&p->list);
754
kfree(p);
755
}
756
}
757
spin_unlock_irq(&iucv_queue_lock);
758
}
759
760
/**
761
* iucv_register:
762
* @handler: address of iucv handler structure
763
* @smp: != 0 indicates that the handler can deal with out of order messages
764
*
765
* Registers a driver with IUCV.
766
*
767
* Returns 0 on success, -ENOMEM if the memory allocation for the pathid
768
* table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
769
*/
770
int iucv_register(struct iucv_handler *handler, int smp)
771
{
772
int rc;
773
774
if (!iucv_available)
775
return -ENOSYS;
776
mutex_lock(&iucv_register_mutex);
777
if (!smp)
778
iucv_nonsmp_handler++;
779
if (list_empty(&iucv_handler_list)) {
780
rc = iucv_enable();
781
if (rc)
782
goto out_mutex;
783
} else if (!smp && iucv_nonsmp_handler == 1)
784
iucv_setmask_up();
785
INIT_LIST_HEAD(&handler->paths);
786
787
spin_lock_bh(&iucv_table_lock);
788
list_add_tail(&handler->list, &iucv_handler_list);
789
spin_unlock_bh(&iucv_table_lock);
790
rc = 0;
791
out_mutex:
792
mutex_unlock(&iucv_register_mutex);
793
return rc;
794
}
795
EXPORT_SYMBOL(iucv_register);
796
797
/**
798
* iucv_unregister
799
* @handler: address of iucv handler structure
800
* @smp: != 0 indicates that the handler can deal with out of order messages
801
*
802
* Unregister driver from IUCV.
803
*/
804
void iucv_unregister(struct iucv_handler *handler, int smp)
805
{
806
struct iucv_path *p, *n;
807
808
mutex_lock(&iucv_register_mutex);
809
spin_lock_bh(&iucv_table_lock);
810
/* Remove handler from the iucv_handler_list. */
811
list_del_init(&handler->list);
812
/* Sever all pathids still referring to the handler. */
813
list_for_each_entry_safe(p, n, &handler->paths, list) {
814
iucv_sever_pathid(p->pathid, NULL);
815
iucv_path_table[p->pathid] = NULL;
816
list_del(&p->list);
817
iucv_path_free(p);
818
}
819
spin_unlock_bh(&iucv_table_lock);
820
if (!smp)
821
iucv_nonsmp_handler--;
822
if (list_empty(&iucv_handler_list))
823
iucv_disable();
824
else if (!smp && iucv_nonsmp_handler == 0)
825
iucv_setmask_mp();
826
mutex_unlock(&iucv_register_mutex);
827
}
828
EXPORT_SYMBOL(iucv_unregister);
829
830
static int iucv_reboot_event(struct notifier_block *this,
831
unsigned long event, void *ptr)
832
{
833
int i;
834
835
if (cpumask_empty(&iucv_irq_cpumask))
836
return NOTIFY_DONE;
837
838
cpus_read_lock();
839
on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1);
840
preempt_disable();
841
for (i = 0; i < iucv_max_pathid; i++) {
842
if (iucv_path_table[i])
843
iucv_sever_pathid(i, NULL);
844
}
845
preempt_enable();
846
cpus_read_unlock();
847
iucv_disable();
848
return NOTIFY_DONE;
849
}
850
851
static struct notifier_block iucv_reboot_notifier = {
852
.notifier_call = iucv_reboot_event,
853
};
854
855
/**
856
* iucv_path_accept
857
* @path: address of iucv path structure
858
* @handler: address of iucv handler structure
859
* @userdata: 16 bytes of data reflected to the communication partner
860
* @private: private data passed to interrupt handlers for this path
861
*
862
* This function is issued after the user received a connection pending
863
* external interrupt and now wishes to complete the IUCV communication path.
864
*
865
* Returns the result of the CP IUCV call.
866
*/
867
int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
868
u8 *userdata, void *private)
869
{
870
union iucv_param *parm;
871
int rc;
872
873
local_bh_disable();
874
if (cpumask_empty(&iucv_buffer_cpumask)) {
875
rc = -EIO;
876
goto out;
877
}
878
/* Prepare parameter block. */
879
parm = iucv_param[smp_processor_id()];
880
memset(parm, 0, sizeof(union iucv_param));
881
parm->ctrl.ippathid = path->pathid;
882
parm->ctrl.ipmsglim = path->msglim;
883
if (userdata)
884
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
885
parm->ctrl.ipflags1 = path->flags;
886
887
rc = iucv_call_b2f0(IUCV_ACCEPT, parm);
888
if (!rc) {
889
path->private = private;
890
path->msglim = parm->ctrl.ipmsglim;
891
path->flags = parm->ctrl.ipflags1;
892
}
893
out:
894
local_bh_enable();
895
return rc;
896
}
897
EXPORT_SYMBOL(iucv_path_accept);
898
899
/**
900
* iucv_path_connect
901
* @path: address of iucv path structure
902
* @handler: address of iucv handler structure
903
* @userid: 8-byte user identification
904
* @system: 8-byte target system identification
905
* @userdata: 16 bytes of data reflected to the communication partner
906
* @private: private data passed to interrupt handlers for this path
907
*
908
* This function establishes an IUCV path. Although the connect may complete
909
* successfully, you are not able to use the path until you receive an IUCV
910
* Connection Complete external interrupt.
911
*
912
* Returns the result of the CP IUCV call.
913
*/
914
int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
915
u8 *userid, u8 *system, u8 *userdata,
916
void *private)
917
{
918
union iucv_param *parm;
919
int rc;
920
921
spin_lock_bh(&iucv_table_lock);
922
iucv_cleanup_queue();
923
if (cpumask_empty(&iucv_buffer_cpumask)) {
924
rc = -EIO;
925
goto out;
926
}
927
parm = iucv_param[smp_processor_id()];
928
memset(parm, 0, sizeof(union iucv_param));
929
parm->ctrl.ipmsglim = path->msglim;
930
parm->ctrl.ipflags1 = path->flags;
931
if (userid) {
932
memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid));
933
ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
934
EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
935
}
936
if (system) {
937
memcpy(parm->ctrl.iptarget, system,
938
sizeof(parm->ctrl.iptarget));
939
ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
940
EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
941
}
942
if (userdata)
943
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
944
945
rc = iucv_call_b2f0(IUCV_CONNECT, parm);
946
if (!rc) {
947
if (parm->ctrl.ippathid < iucv_max_pathid) {
948
path->pathid = parm->ctrl.ippathid;
949
path->msglim = parm->ctrl.ipmsglim;
950
path->flags = parm->ctrl.ipflags1;
951
path->handler = handler;
952
path->private = private;
953
list_add_tail(&path->list, &handler->paths);
954
iucv_path_table[path->pathid] = path;
955
} else {
956
iucv_sever_pathid(parm->ctrl.ippathid,
957
iucv_error_pathid);
958
rc = -EIO;
959
}
960
}
961
out:
962
spin_unlock_bh(&iucv_table_lock);
963
return rc;
964
}
965
EXPORT_SYMBOL(iucv_path_connect);
966
967
/**
968
* iucv_path_quiesce:
969
* @path: address of iucv path structure
970
* @userdata: 16 bytes of data reflected to the communication partner
971
*
972
* This function temporarily suspends incoming messages on an IUCV path.
973
* You can later reactivate the path by invoking the iucv_resume function.
974
*
975
* Returns the result from the CP IUCV call.
976
*/
977
int iucv_path_quiesce(struct iucv_path *path, u8 *userdata)
978
{
979
union iucv_param *parm;
980
int rc;
981
982
local_bh_disable();
983
if (cpumask_empty(&iucv_buffer_cpumask)) {
984
rc = -EIO;
985
goto out;
986
}
987
parm = iucv_param[smp_processor_id()];
988
memset(parm, 0, sizeof(union iucv_param));
989
if (userdata)
990
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
991
parm->ctrl.ippathid = path->pathid;
992
rc = iucv_call_b2f0(IUCV_QUIESCE, parm);
993
out:
994
local_bh_enable();
995
return rc;
996
}
997
EXPORT_SYMBOL(iucv_path_quiesce);
998
999
/**
1000
* iucv_path_resume:
1001
* @path: address of iucv path structure
1002
* @userdata: 16 bytes of data reflected to the communication partner
1003
*
1004
* This function resumes incoming messages on an IUCV path that has
1005
* been stopped with iucv_path_quiesce.
1006
*
1007
* Returns the result from the CP IUCV call.
1008
*/
1009
int iucv_path_resume(struct iucv_path *path, u8 *userdata)
1010
{
1011
union iucv_param *parm;
1012
int rc;
1013
1014
local_bh_disable();
1015
if (cpumask_empty(&iucv_buffer_cpumask)) {
1016
rc = -EIO;
1017
goto out;
1018
}
1019
parm = iucv_param[smp_processor_id()];
1020
memset(parm, 0, sizeof(union iucv_param));
1021
if (userdata)
1022
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
1023
parm->ctrl.ippathid = path->pathid;
1024
rc = iucv_call_b2f0(IUCV_RESUME, parm);
1025
out:
1026
local_bh_enable();
1027
return rc;
1028
}
1029
1030
/**
1031
* iucv_path_sever
1032
* @path: address of iucv path structure
1033
* @userdata: 16 bytes of data reflected to the communication partner
1034
*
1035
* This function terminates an IUCV path.
1036
*
1037
* Returns the result from the CP IUCV call.
1038
*/
1039
int iucv_path_sever(struct iucv_path *path, u8 *userdata)
1040
{
1041
int rc;
1042
1043
preempt_disable();
1044
if (cpumask_empty(&iucv_buffer_cpumask)) {
1045
rc = -EIO;
1046
goto out;
1047
}
1048
if (iucv_active_cpu != smp_processor_id())
1049
spin_lock_bh(&iucv_table_lock);
1050
rc = iucv_sever_pathid(path->pathid, userdata);
1051
iucv_path_table[path->pathid] = NULL;
1052
list_del_init(&path->list);
1053
if (iucv_active_cpu != smp_processor_id())
1054
spin_unlock_bh(&iucv_table_lock);
1055
out:
1056
preempt_enable();
1057
return rc;
1058
}
1059
EXPORT_SYMBOL(iucv_path_sever);
1060
1061
/**
1062
* iucv_message_purge
1063
* @path: address of iucv path structure
1064
* @msg: address of iucv msg structure
1065
* @srccls: source class of message
1066
*
1067
* Cancels a message you have sent.
1068
*
1069
* Returns the result from the CP IUCV call.
1070
*/
1071
int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
1072
u32 srccls)
1073
{
1074
union iucv_param *parm;
1075
int rc;
1076
1077
local_bh_disable();
1078
if (cpumask_empty(&iucv_buffer_cpumask)) {
1079
rc = -EIO;
1080
goto out;
1081
}
1082
parm = iucv_param[smp_processor_id()];
1083
memset(parm, 0, sizeof(union iucv_param));
1084
parm->purge.ippathid = path->pathid;
1085
parm->purge.ipmsgid = msg->id;
1086
parm->purge.ipsrccls = srccls;
1087
parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID;
1088
rc = iucv_call_b2f0(IUCV_PURGE, parm);
1089
if (!rc) {
1090
msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8;
1091
msg->tag = parm->purge.ipmsgtag;
1092
}
1093
out:
1094
local_bh_enable();
1095
return rc;
1096
}
1097
EXPORT_SYMBOL(iucv_message_purge);
1098
1099
/**
1100
* iucv_message_receive_iprmdata
1101
* @path: address of iucv path structure
1102
* @msg: address of iucv msg structure
1103
* @flags: how the message is received (IUCV_IPBUFLST)
1104
* @buffer: address of data buffer or address of struct iucv_array
1105
* @size: length of data buffer
1106
* @residual:
1107
*
1108
* Internal function used by iucv_message_receive and __iucv_message_receive
1109
* to receive RMDATA data stored in struct iucv_message.
1110
*/
1111
static int iucv_message_receive_iprmdata(struct iucv_path *path,
1112
struct iucv_message *msg,
1113
u8 flags, void *buffer,
1114
size_t size, size_t *residual)
1115
{
1116
struct iucv_array *array;
1117
u8 *rmmsg;
1118
size_t copy;
1119
1120
/*
1121
* Message is 8 bytes long and has been stored to the
1122
* message descriptor itself.
1123
*/
1124
if (residual)
1125
*residual = abs(size - 8);
1126
rmmsg = msg->rmmsg;
1127
if (flags & IUCV_IPBUFLST) {
1128
/* Copy to struct iucv_array. */
1129
size = (size < 8) ? size : 8;
1130
for (array = buffer; size > 0; array++) {
1131
copy = min_t(size_t, size, array->length);
1132
memcpy(dma32_to_virt(array->address), rmmsg, copy);
1133
rmmsg += copy;
1134
size -= copy;
1135
}
1136
} else {
1137
/* Copy to direct buffer. */
1138
memcpy(buffer, rmmsg, min_t(size_t, size, 8));
1139
}
1140
return 0;
1141
}
1142
1143
/**
1144
* __iucv_message_receive
1145
* @path: address of iucv path structure
1146
* @msg: address of iucv msg structure
1147
* @flags: how the message is received (IUCV_IPBUFLST)
1148
* @buffer: address of data buffer or address of struct iucv_array
1149
* @size: length of data buffer
1150
* @residual:
1151
*
1152
* This function receives messages that are being sent to you over
1153
* established paths. This function will deal with RMDATA messages
1154
* embedded in struct iucv_message as well.
1155
*
1156
* Locking: no locking
1157
*
1158
* Returns the result from the CP IUCV call.
1159
*/
1160
int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1161
u8 flags, void *buffer, size_t size, size_t *residual)
1162
{
1163
union iucv_param *parm;
1164
int rc;
1165
1166
if (msg->flags & IUCV_IPRMDATA)
1167
return iucv_message_receive_iprmdata(path, msg, flags,
1168
buffer, size, residual);
1169
if (cpumask_empty(&iucv_buffer_cpumask))
1170
return -EIO;
1171
1172
parm = iucv_param[smp_processor_id()];
1173
memset(parm, 0, sizeof(union iucv_param));
1174
parm->db.ipbfadr1 = virt_to_dma32(buffer);
1175
parm->db.ipbfln1f = (u32) size;
1176
parm->db.ipmsgid = msg->id;
1177
parm->db.ippathid = path->pathid;
1178
parm->db.iptrgcls = msg->class;
1179
parm->db.ipflags1 = (flags | IUCV_IPFGPID |
1180
IUCV_IPFGMID | IUCV_IPTRGCLS);
1181
rc = iucv_call_b2f0(IUCV_RECEIVE, parm);
1182
if (!rc || rc == 5) {
1183
msg->flags = parm->db.ipflags1;
1184
if (residual)
1185
*residual = parm->db.ipbfln1f;
1186
}
1187
return rc;
1188
}
1189
EXPORT_SYMBOL(__iucv_message_receive);
1190
1191
/**
1192
* iucv_message_receive
1193
* @path: address of iucv path structure
1194
* @msg: address of iucv msg structure
1195
* @flags: how the message is received (IUCV_IPBUFLST)
1196
* @buffer: address of data buffer or address of struct iucv_array
1197
* @size: length of data buffer
1198
* @residual:
1199
*
1200
* This function receives messages that are being sent to you over
1201
* established paths. This function will deal with RMDATA messages
1202
* embedded in struct iucv_message as well.
1203
*
1204
* Locking: local_bh_enable/local_bh_disable
1205
*
1206
* Returns the result from the CP IUCV call.
1207
*/
1208
int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1209
u8 flags, void *buffer, size_t size, size_t *residual)
1210
{
1211
int rc;
1212
1213
if (msg->flags & IUCV_IPRMDATA)
1214
return iucv_message_receive_iprmdata(path, msg, flags,
1215
buffer, size, residual);
1216
local_bh_disable();
1217
rc = __iucv_message_receive(path, msg, flags, buffer, size, residual);
1218
local_bh_enable();
1219
return rc;
1220
}
1221
EXPORT_SYMBOL(iucv_message_receive);
1222
1223
/**
1224
* iucv_message_reject
1225
* @path: address of iucv path structure
1226
* @msg: address of iucv msg structure
1227
*
1228
* The reject function refuses a specified message. Between the time you
1229
* are notified of a message and the time that you complete the message,
1230
* the message may be rejected.
1231
*
1232
* Returns the result from the CP IUCV call.
1233
*/
1234
int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1235
{
1236
union iucv_param *parm;
1237
int rc;
1238
1239
local_bh_disable();
1240
if (cpumask_empty(&iucv_buffer_cpumask)) {
1241
rc = -EIO;
1242
goto out;
1243
}
1244
parm = iucv_param[smp_processor_id()];
1245
memset(parm, 0, sizeof(union iucv_param));
1246
parm->db.ippathid = path->pathid;
1247
parm->db.ipmsgid = msg->id;
1248
parm->db.iptrgcls = msg->class;
1249
parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID);
1250
rc = iucv_call_b2f0(IUCV_REJECT, parm);
1251
out:
1252
local_bh_enable();
1253
return rc;
1254
}
1255
EXPORT_SYMBOL(iucv_message_reject);
1256
1257
/**
1258
* iucv_message_reply
1259
* @path: address of iucv path structure
1260
* @msg: address of iucv msg structure
1261
* @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1262
* @reply: address of reply data buffer or address of struct iucv_array
1263
* @size: length of reply data buffer
1264
*
1265
* This function responds to the two-way messages that you receive. You
1266
* must identify completely the message to which you wish to reply. ie,
1267
* pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
1268
* the parameter list.
1269
*
1270
* Returns the result from the CP IUCV call.
1271
*/
1272
int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1273
u8 flags, void *reply, size_t size)
1274
{
1275
union iucv_param *parm;
1276
int rc;
1277
1278
local_bh_disable();
1279
if (cpumask_empty(&iucv_buffer_cpumask)) {
1280
rc = -EIO;
1281
goto out;
1282
}
1283
parm = iucv_param[smp_processor_id()];
1284
memset(parm, 0, sizeof(union iucv_param));
1285
if (flags & IUCV_IPRMDATA) {
1286
parm->dpl.ippathid = path->pathid;
1287
parm->dpl.ipflags1 = flags;
1288
parm->dpl.ipmsgid = msg->id;
1289
parm->dpl.iptrgcls = msg->class;
1290
memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
1291
} else {
1292
parm->db.ipbfadr1 = virt_to_dma32(reply);
1293
parm->db.ipbfln1f = (u32) size;
1294
parm->db.ippathid = path->pathid;
1295
parm->db.ipflags1 = flags;
1296
parm->db.ipmsgid = msg->id;
1297
parm->db.iptrgcls = msg->class;
1298
}
1299
rc = iucv_call_b2f0(IUCV_REPLY, parm);
1300
out:
1301
local_bh_enable();
1302
return rc;
1303
}
1304
EXPORT_SYMBOL(iucv_message_reply);
1305
1306
/**
1307
* __iucv_message_send
1308
* @path: address of iucv path structure
1309
* @msg: address of iucv msg structure
1310
* @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1311
* @srccls: source class of message
1312
* @buffer: address of send buffer or address of struct iucv_array
1313
* @size: length of send buffer
1314
*
1315
* This function transmits data to another application. Data to be
1316
* transmitted is in a buffer and this is a one-way message and the
1317
* receiver will not reply to the message.
1318
*
1319
* Locking: no locking
1320
*
1321
* Returns the result from the CP IUCV call.
1322
*/
1323
int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1324
u8 flags, u32 srccls, void *buffer, size_t size)
1325
{
1326
union iucv_param *parm;
1327
int rc;
1328
1329
if (cpumask_empty(&iucv_buffer_cpumask)) {
1330
rc = -EIO;
1331
goto out;
1332
}
1333
parm = iucv_param[smp_processor_id()];
1334
memset(parm, 0, sizeof(union iucv_param));
1335
if (flags & IUCV_IPRMDATA) {
1336
/* Message of 8 bytes can be placed into the parameter list. */
1337
parm->dpl.ippathid = path->pathid;
1338
parm->dpl.ipflags1 = flags | IUCV_IPNORPY;
1339
parm->dpl.iptrgcls = msg->class;
1340
parm->dpl.ipsrccls = srccls;
1341
parm->dpl.ipmsgtag = msg->tag;
1342
memcpy(parm->dpl.iprmmsg, buffer, 8);
1343
} else {
1344
parm->db.ipbfadr1 = virt_to_dma32(buffer);
1345
parm->db.ipbfln1f = (u32) size;
1346
parm->db.ippathid = path->pathid;
1347
parm->db.ipflags1 = flags | IUCV_IPNORPY;
1348
parm->db.iptrgcls = msg->class;
1349
parm->db.ipsrccls = srccls;
1350
parm->db.ipmsgtag = msg->tag;
1351
}
1352
rc = iucv_call_b2f0(IUCV_SEND, parm);
1353
if (!rc)
1354
msg->id = parm->db.ipmsgid;
1355
out:
1356
return rc;
1357
}
1358
EXPORT_SYMBOL(__iucv_message_send);
1359
1360
/**
1361
* iucv_message_send
1362
* @path: address of iucv path structure
1363
* @msg: address of iucv msg structure
1364
* @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1365
* @srccls: source class of message
1366
* @buffer: address of send buffer or address of struct iucv_array
1367
* @size: length of send buffer
1368
*
1369
* This function transmits data to another application. Data to be
1370
* transmitted is in a buffer and this is a one-way message and the
1371
* receiver will not reply to the message.
1372
*
1373
* Locking: local_bh_enable/local_bh_disable
1374
*
1375
* Returns the result from the CP IUCV call.
1376
*/
1377
int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1378
u8 flags, u32 srccls, void *buffer, size_t size)
1379
{
1380
int rc;
1381
1382
local_bh_disable();
1383
rc = __iucv_message_send(path, msg, flags, srccls, buffer, size);
1384
local_bh_enable();
1385
return rc;
1386
}
1387
EXPORT_SYMBOL(iucv_message_send);
1388
1389
/**
1390
* iucv_message_send2way
1391
* @path: address of iucv path structure
1392
* @msg: address of iucv msg structure
1393
* @flags: how the message is sent and the reply is received
1394
* (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
1395
* @srccls: source class of message
1396
* @buffer: address of send buffer or address of struct iucv_array
1397
* @size: length of send buffer
1398
* @answer: address of answer buffer or address of struct iucv_array
1399
* @asize: size of reply buffer
1400
* @residual: ignored
1401
*
1402
* This function transmits data to another application. Data to be
1403
* transmitted is in a buffer. The receiver of the send is expected to
1404
* reply to the message and a buffer is provided into which IUCV moves
1405
* the reply to this message.
1406
*
1407
* Returns the result from the CP IUCV call.
1408
*/
1409
int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1410
u8 flags, u32 srccls, void *buffer, size_t size,
1411
void *answer, size_t asize, size_t *residual)
1412
{
1413
union iucv_param *parm;
1414
int rc;
1415
1416
local_bh_disable();
1417
if (cpumask_empty(&iucv_buffer_cpumask)) {
1418
rc = -EIO;
1419
goto out;
1420
}
1421
parm = iucv_param[smp_processor_id()];
1422
memset(parm, 0, sizeof(union iucv_param));
1423
if (flags & IUCV_IPRMDATA) {
1424
parm->dpl.ippathid = path->pathid;
1425
parm->dpl.ipflags1 = path->flags; /* priority message */
1426
parm->dpl.iptrgcls = msg->class;
1427
parm->dpl.ipsrccls = srccls;
1428
parm->dpl.ipmsgtag = msg->tag;
1429
parm->dpl.ipbfadr2 = virt_to_dma32(answer);
1430
parm->dpl.ipbfln2f = (u32) asize;
1431
memcpy(parm->dpl.iprmmsg, buffer, 8);
1432
} else {
1433
parm->db.ippathid = path->pathid;
1434
parm->db.ipflags1 = path->flags; /* priority message */
1435
parm->db.iptrgcls = msg->class;
1436
parm->db.ipsrccls = srccls;
1437
parm->db.ipmsgtag = msg->tag;
1438
parm->db.ipbfadr1 = virt_to_dma32(buffer);
1439
parm->db.ipbfln1f = (u32) size;
1440
parm->db.ipbfadr2 = virt_to_dma32(answer);
1441
parm->db.ipbfln2f = (u32) asize;
1442
}
1443
rc = iucv_call_b2f0(IUCV_SEND, parm);
1444
if (!rc)
1445
msg->id = parm->db.ipmsgid;
1446
out:
1447
local_bh_enable();
1448
return rc;
1449
}
1450
EXPORT_SYMBOL(iucv_message_send2way);
1451
1452
struct iucv_path_pending {
1453
u16 ippathid;
1454
u8 ipflags1;
1455
u8 iptype;
1456
u16 ipmsglim;
1457
u16 res1;
1458
u8 ipvmid[8];
1459
u8 ipuser[16];
1460
u32 res3;
1461
u8 ippollfg;
1462
u8 res4[3];
1463
} __packed;
1464
1465
/**
1466
* iucv_path_pending
1467
* @data: Pointer to external interrupt buffer
1468
*
1469
* Process connection pending work item. Called from tasklet while holding
1470
* iucv_table_lock.
1471
*/
1472
static void iucv_path_pending(struct iucv_irq_data *data)
1473
{
1474
struct iucv_path_pending *ipp = (void *) data;
1475
struct iucv_handler *handler;
1476
struct iucv_path *path;
1477
char *error;
1478
1479
BUG_ON(iucv_path_table[ipp->ippathid]);
1480
/* New pathid, handler found. Create a new path struct. */
1481
error = iucv_error_no_memory;
1482
path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC);
1483
if (!path)
1484
goto out_sever;
1485
path->pathid = ipp->ippathid;
1486
iucv_path_table[path->pathid] = path;
1487
EBCASC(ipp->ipvmid, 8);
1488
1489
/* Call registered handler until one is found that wants the path. */
1490
list_for_each_entry(handler, &iucv_handler_list, list) {
1491
if (!handler->path_pending)
1492
continue;
1493
/*
1494
* Add path to handler to allow a call to iucv_path_sever
1495
* inside the path_pending function. If the handler returns
1496
* an error remove the path from the handler again.
1497
*/
1498
list_add(&path->list, &handler->paths);
1499
path->handler = handler;
1500
if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser))
1501
return;
1502
list_del(&path->list);
1503
path->handler = NULL;
1504
}
1505
/* No handler wanted the path. */
1506
iucv_path_table[path->pathid] = NULL;
1507
iucv_path_free(path);
1508
error = iucv_error_no_listener;
1509
out_sever:
1510
iucv_sever_pathid(ipp->ippathid, error);
1511
}
1512
1513
struct iucv_path_complete {
1514
u16 ippathid;
1515
u8 ipflags1;
1516
u8 iptype;
1517
u16 ipmsglim;
1518
u16 res1;
1519
u8 res2[8];
1520
u8 ipuser[16];
1521
u32 res3;
1522
u8 ippollfg;
1523
u8 res4[3];
1524
} __packed;
1525
1526
/**
1527
* iucv_path_complete
1528
* @data: Pointer to external interrupt buffer
1529
*
1530
* Process connection complete work item. Called from tasklet while holding
1531
* iucv_table_lock.
1532
*/
1533
static void iucv_path_complete(struct iucv_irq_data *data)
1534
{
1535
struct iucv_path_complete *ipc = (void *) data;
1536
struct iucv_path *path = iucv_path_table[ipc->ippathid];
1537
1538
if (path)
1539
path->flags = ipc->ipflags1;
1540
if (path && path->handler && path->handler->path_complete)
1541
path->handler->path_complete(path, ipc->ipuser);
1542
}
1543
1544
struct iucv_path_severed {
1545
u16 ippathid;
1546
u8 res1;
1547
u8 iptype;
1548
u32 res2;
1549
u8 res3[8];
1550
u8 ipuser[16];
1551
u32 res4;
1552
u8 ippollfg;
1553
u8 res5[3];
1554
} __packed;
1555
1556
/**
1557
* iucv_path_severed
1558
* @data: Pointer to external interrupt buffer
1559
*
1560
* Process connection severed work item. Called from tasklet while holding
1561
* iucv_table_lock.
1562
*/
1563
static void iucv_path_severed(struct iucv_irq_data *data)
1564
{
1565
struct iucv_path_severed *ips = (void *) data;
1566
struct iucv_path *path = iucv_path_table[ips->ippathid];
1567
1568
if (!path || !path->handler) /* Already severed */
1569
return;
1570
if (path->handler->path_severed)
1571
path->handler->path_severed(path, ips->ipuser);
1572
else {
1573
iucv_sever_pathid(path->pathid, NULL);
1574
iucv_path_table[path->pathid] = NULL;
1575
list_del(&path->list);
1576
iucv_path_free(path);
1577
}
1578
}
1579
1580
struct iucv_path_quiesced {
1581
u16 ippathid;
1582
u8 res1;
1583
u8 iptype;
1584
u32 res2;
1585
u8 res3[8];
1586
u8 ipuser[16];
1587
u32 res4;
1588
u8 ippollfg;
1589
u8 res5[3];
1590
} __packed;
1591
1592
/**
1593
* iucv_path_quiesced
1594
* @data: Pointer to external interrupt buffer
1595
*
1596
* Process connection quiesced work item. Called from tasklet while holding
1597
* iucv_table_lock.
1598
*/
1599
static void iucv_path_quiesced(struct iucv_irq_data *data)
1600
{
1601
struct iucv_path_quiesced *ipq = (void *) data;
1602
struct iucv_path *path = iucv_path_table[ipq->ippathid];
1603
1604
if (path && path->handler && path->handler->path_quiesced)
1605
path->handler->path_quiesced(path, ipq->ipuser);
1606
}
1607
1608
struct iucv_path_resumed {
1609
u16 ippathid;
1610
u8 res1;
1611
u8 iptype;
1612
u32 res2;
1613
u8 res3[8];
1614
u8 ipuser[16];
1615
u32 res4;
1616
u8 ippollfg;
1617
u8 res5[3];
1618
} __packed;
1619
1620
/**
1621
* iucv_path_resumed
1622
* @data: Pointer to external interrupt buffer
1623
*
1624
* Process connection resumed work item. Called from tasklet while holding
1625
* iucv_table_lock.
1626
*/
1627
static void iucv_path_resumed(struct iucv_irq_data *data)
1628
{
1629
struct iucv_path_resumed *ipr = (void *) data;
1630
struct iucv_path *path = iucv_path_table[ipr->ippathid];
1631
1632
if (path && path->handler && path->handler->path_resumed)
1633
path->handler->path_resumed(path, ipr->ipuser);
1634
}
1635
1636
struct iucv_message_complete {
1637
u16 ippathid;
1638
u8 ipflags1;
1639
u8 iptype;
1640
u32 ipmsgid;
1641
u32 ipaudit;
1642
u8 iprmmsg[8];
1643
u32 ipsrccls;
1644
u32 ipmsgtag;
1645
u32 res;
1646
u32 ipbfln2f;
1647
u8 ippollfg;
1648
u8 res2[3];
1649
} __packed;
1650
1651
/**
1652
* iucv_message_complete
1653
* @data: Pointer to external interrupt buffer
1654
*
1655
* Process message complete work item. Called from tasklet while holding
1656
* iucv_table_lock.
1657
*/
1658
static void iucv_message_complete(struct iucv_irq_data *data)
1659
{
1660
struct iucv_message_complete *imc = (void *) data;
1661
struct iucv_path *path = iucv_path_table[imc->ippathid];
1662
struct iucv_message msg;
1663
1664
if (path && path->handler && path->handler->message_complete) {
1665
msg.flags = imc->ipflags1;
1666
msg.id = imc->ipmsgid;
1667
msg.audit = imc->ipaudit;
1668
memcpy(msg.rmmsg, imc->iprmmsg, 8);
1669
msg.class = imc->ipsrccls;
1670
msg.tag = imc->ipmsgtag;
1671
msg.length = imc->ipbfln2f;
1672
path->handler->message_complete(path, &msg);
1673
}
1674
}
1675
1676
struct iucv_message_pending {
1677
u16 ippathid;
1678
u8 ipflags1;
1679
u8 iptype;
1680
u32 ipmsgid;
1681
u32 iptrgcls;
1682
struct {
1683
union {
1684
u32 iprmmsg1_u32;
1685
u8 iprmmsg1[4];
1686
} ln1msg1;
1687
union {
1688
u32 ipbfln1f;
1689
u8 iprmmsg2[4];
1690
} ln1msg2;
1691
} rmmsg;
1692
u32 res1[3];
1693
u32 ipbfln2f;
1694
u8 ippollfg;
1695
u8 res2[3];
1696
} __packed;
1697
1698
/**
1699
* iucv_message_pending
1700
* @data: Pointer to external interrupt buffer
1701
*
1702
* Process message pending work item. Called from tasklet while holding
1703
* iucv_table_lock.
1704
*/
1705
static void iucv_message_pending(struct iucv_irq_data *data)
1706
{
1707
struct iucv_message_pending *imp = (void *) data;
1708
struct iucv_path *path = iucv_path_table[imp->ippathid];
1709
struct iucv_message msg;
1710
1711
if (path && path->handler && path->handler->message_pending) {
1712
msg.flags = imp->ipflags1;
1713
msg.id = imp->ipmsgid;
1714
msg.class = imp->iptrgcls;
1715
if (imp->ipflags1 & IUCV_IPRMDATA) {
1716
memcpy(msg.rmmsg, &imp->rmmsg, 8);
1717
msg.length = 8;
1718
} else
1719
msg.length = imp->rmmsg.ln1msg2.ipbfln1f;
1720
msg.reply_size = imp->ipbfln2f;
1721
path->handler->message_pending(path, &msg);
1722
}
1723
}
1724
1725
/*
1726
* iucv_tasklet_fn:
1727
*
1728
* This tasklet loops over the queue of irq buffers created by
1729
* iucv_external_interrupt, calls the appropriate action handler
1730
* and then frees the buffer.
1731
*/
1732
static void iucv_tasklet_fn(unsigned long ignored)
1733
{
1734
typedef void iucv_irq_fn(struct iucv_irq_data *);
1735
static iucv_irq_fn *irq_fn[] = {
1736
[0x02] = iucv_path_complete,
1737
[0x03] = iucv_path_severed,
1738
[0x04] = iucv_path_quiesced,
1739
[0x05] = iucv_path_resumed,
1740
[0x06] = iucv_message_complete,
1741
[0x07] = iucv_message_complete,
1742
[0x08] = iucv_message_pending,
1743
[0x09] = iucv_message_pending,
1744
};
1745
LIST_HEAD(task_queue);
1746
struct iucv_irq_list *p, *n;
1747
1748
/* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1749
if (!spin_trylock(&iucv_table_lock)) {
1750
tasklet_schedule(&iucv_tasklet);
1751
return;
1752
}
1753
iucv_active_cpu = smp_processor_id();
1754
1755
spin_lock_irq(&iucv_queue_lock);
1756
list_splice_init(&iucv_task_queue, &task_queue);
1757
spin_unlock_irq(&iucv_queue_lock);
1758
1759
list_for_each_entry_safe(p, n, &task_queue, list) {
1760
list_del_init(&p->list);
1761
irq_fn[p->data.iptype](&p->data);
1762
kfree(p);
1763
}
1764
1765
iucv_active_cpu = -1;
1766
spin_unlock(&iucv_table_lock);
1767
}
1768
1769
/*
1770
* iucv_work_fn:
1771
*
1772
* This work function loops over the queue of path pending irq blocks
1773
* created by iucv_external_interrupt, calls the appropriate action
1774
* handler and then frees the buffer.
1775
*/
1776
static void iucv_work_fn(struct work_struct *work)
1777
{
1778
LIST_HEAD(work_queue);
1779
struct iucv_irq_list *p, *n;
1780
1781
/* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1782
spin_lock_bh(&iucv_table_lock);
1783
iucv_active_cpu = smp_processor_id();
1784
1785
spin_lock_irq(&iucv_queue_lock);
1786
list_splice_init(&iucv_work_queue, &work_queue);
1787
spin_unlock_irq(&iucv_queue_lock);
1788
1789
iucv_cleanup_queue();
1790
list_for_each_entry_safe(p, n, &work_queue, list) {
1791
list_del_init(&p->list);
1792
iucv_path_pending(&p->data);
1793
kfree(p);
1794
}
1795
1796
iucv_active_cpu = -1;
1797
spin_unlock_bh(&iucv_table_lock);
1798
}
1799
1800
/*
1801
* iucv_external_interrupt
1802
*
1803
* Handles external interrupts coming in from CP.
1804
* Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
1805
*/
1806
static void iucv_external_interrupt(struct ext_code ext_code,
1807
unsigned int param32, unsigned long param64)
1808
{
1809
struct iucv_irq_data *p;
1810
struct iucv_irq_list *work;
1811
1812
inc_irq_stat(IRQEXT_IUC);
1813
p = iucv_irq_data[smp_processor_id()];
1814
if (p->ippathid >= iucv_max_pathid) {
1815
WARN_ON(p->ippathid >= iucv_max_pathid);
1816
iucv_sever_pathid(p->ippathid, iucv_error_no_listener);
1817
return;
1818
}
1819
BUG_ON(p->iptype < 0x01 || p->iptype > 0x09);
1820
work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
1821
if (!work) {
1822
pr_warn("iucv_external_interrupt: out of memory\n");
1823
return;
1824
}
1825
memcpy(&work->data, p, sizeof(work->data));
1826
spin_lock(&iucv_queue_lock);
1827
if (p->iptype == 0x01) {
1828
/* Path pending interrupt. */
1829
list_add_tail(&work->list, &iucv_work_queue);
1830
schedule_work(&iucv_work);
1831
} else {
1832
/* The other interrupts. */
1833
list_add_tail(&work->list, &iucv_task_queue);
1834
tasklet_schedule(&iucv_tasklet);
1835
}
1836
spin_unlock(&iucv_queue_lock);
1837
}
1838
1839
struct iucv_interface iucv_if = {
1840
.message_receive = iucv_message_receive,
1841
.__message_receive = __iucv_message_receive,
1842
.message_reply = iucv_message_reply,
1843
.message_reject = iucv_message_reject,
1844
.message_send = iucv_message_send,
1845
.__message_send = __iucv_message_send,
1846
.message_send2way = iucv_message_send2way,
1847
.message_purge = iucv_message_purge,
1848
.path_accept = iucv_path_accept,
1849
.path_connect = iucv_path_connect,
1850
.path_quiesce = iucv_path_quiesce,
1851
.path_resume = iucv_path_resume,
1852
.path_sever = iucv_path_sever,
1853
.iucv_register = iucv_register,
1854
.iucv_unregister = iucv_unregister,
1855
.bus = NULL,
1856
.root = NULL,
1857
};
1858
EXPORT_SYMBOL(iucv_if);
1859
1860
static enum cpuhp_state iucv_online;
1861
/**
1862
* iucv_init
1863
*
1864
* Allocates and initializes various data structures.
1865
*/
1866
static int __init iucv_init(void)
1867
{
1868
int rc;
1869
1870
if (!machine_is_vm()) {
1871
rc = -EPROTONOSUPPORT;
1872
goto out;
1873
}
1874
system_ctl_set_bit(0, CR0_IUCV_BIT);
1875
rc = iucv_query_maxconn();
1876
if (rc)
1877
goto out_ctl;
1878
rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
1879
if (rc)
1880
goto out_ctl;
1881
iucv_root = root_device_register("iucv");
1882
if (IS_ERR(iucv_root)) {
1883
rc = PTR_ERR(iucv_root);
1884
goto out_int;
1885
}
1886
1887
rc = cpuhp_setup_state(CPUHP_NET_IUCV_PREPARE, "net/iucv:prepare",
1888
iucv_cpu_prepare, iucv_cpu_dead);
1889
if (rc)
1890
goto out_dev;
1891
rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "net/iucv:online",
1892
iucv_cpu_online, iucv_cpu_down_prep);
1893
if (rc < 0)
1894
goto out_prep;
1895
iucv_online = rc;
1896
1897
rc = register_reboot_notifier(&iucv_reboot_notifier);
1898
if (rc)
1899
goto out_remove_hp;
1900
ASCEBC(iucv_error_no_listener, 16);
1901
ASCEBC(iucv_error_no_memory, 16);
1902
ASCEBC(iucv_error_pathid, 16);
1903
iucv_available = 1;
1904
rc = bus_register(&iucv_bus);
1905
if (rc)
1906
goto out_reboot;
1907
iucv_if.root = iucv_root;
1908
iucv_if.bus = &iucv_bus;
1909
return 0;
1910
1911
out_reboot:
1912
unregister_reboot_notifier(&iucv_reboot_notifier);
1913
out_remove_hp:
1914
cpuhp_remove_state(iucv_online);
1915
out_prep:
1916
cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE);
1917
out_dev:
1918
root_device_unregister(iucv_root);
1919
out_int:
1920
unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
1921
out_ctl:
1922
system_ctl_clear_bit(0, 1);
1923
out:
1924
return rc;
1925
}
1926
1927
/**
1928
* iucv_exit
1929
*
1930
* Frees everything allocated from iucv_init.
1931
*/
1932
static void __exit iucv_exit(void)
1933
{
1934
struct iucv_irq_list *p, *n;
1935
1936
spin_lock_irq(&iucv_queue_lock);
1937
list_for_each_entry_safe(p, n, &iucv_task_queue, list)
1938
kfree(p);
1939
list_for_each_entry_safe(p, n, &iucv_work_queue, list)
1940
kfree(p);
1941
spin_unlock_irq(&iucv_queue_lock);
1942
unregister_reboot_notifier(&iucv_reboot_notifier);
1943
1944
cpuhp_remove_state_nocalls(iucv_online);
1945
cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE);
1946
root_device_unregister(iucv_root);
1947
bus_unregister(&iucv_bus);
1948
unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
1949
}
1950
1951
subsys_initcall(iucv_init);
1952
module_exit(iucv_exit);
1953
1954
MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert <[email protected]>");
1955
MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
1956
MODULE_LICENSE("GPL");
1957
1958