1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for Vector Instructions
*
* Assembler macros to generate .byte/.word code for particular
* vector instructions that are supported by recent binutils (>= 2.26) only.
*
* Copyright IBM Corp. 2015
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
#ifndef __ASM_S390_VX_INSN_INTERNAL_H
#define __ASM_S390_VX_INSN_INTERNAL_H
#ifndef __ASM_S390_VX_INSN_H
#error only <asm/vx-insn.h> can be included directly
#endif
#ifdef __ASSEMBLY__
/* Macros to generate vector instruction byte code */
/* GR_NUM - Retrieve general-purpose register number
*
* @opd: Operand to store register number
* @r64: String designation register in the format "%rN"
*/
.macro GR_NUM opd gr
\opd = 255
.ifc \gr,%r0
\opd = 0
.endif
.ifc \gr,%r1
\opd = 1
.endif
.ifc \gr,%r2
\opd = 2
.endif
.ifc \gr,%r3
\opd = 3
.endif
.ifc \gr,%r4
\opd = 4
.endif
.ifc \gr,%r5
\opd = 5
.endif
.ifc \gr,%r6
\opd = 6
.endif
.ifc \gr,%r7
\opd = 7
.endif
.ifc \gr,%r8
\opd = 8
.endif
.ifc \gr,%r9
\opd = 9
.endif
.ifc \gr,%r10
\opd = 10
.endif
.ifc \gr,%r11
\opd = 11
.endif
.ifc \gr,%r12
\opd = 12
.endif
.ifc \gr,%r13
\opd = 13
.endif
.ifc \gr,%r14
\opd = 14
.endif
.ifc \gr,%r15
\opd = 15
.endif
.if \opd == 255
\opd = \gr
.endif
.endm
/* VX_NUM - Retrieve vector register number
*
* @opd: Operand to store register number
* @vxr: String designation register in the format "%vN"
*
* The vector register number is used for as input number to the
* instruction and, as well as, to compute the RXB field of the
* instruction.
*/
.macro VX_NUM opd vxr
\opd = 255
.ifc \vxr,%v0
\opd = 0
.endif
.ifc \vxr,%v1
\opd = 1
.endif
.ifc \vxr,%v2
\opd = 2
.endif
.ifc \vxr,%v3
\opd = 3
.endif
.ifc \vxr,%v4
\opd = 4
.endif
.ifc \vxr,%v5
\opd = 5
.endif
.ifc \vxr,%v6
\opd = 6
.endif
.ifc \vxr,%v7
\opd = 7
.endif
.ifc \vxr,%v8
\opd = 8
.endif
.ifc \vxr,%v9
\opd = 9
.endif
.ifc \vxr,%v10
\opd = 10
.endif
.ifc \vxr,%v11
\opd = 11
.endif
.ifc \vxr,%v12
\opd = 12
.endif
.ifc \vxr,%v13
\opd = 13
.endif
.ifc \vxr,%v14
\opd = 14
.endif
.ifc \vxr,%v15
\opd = 15
.endif
.ifc \vxr,%v16
\opd = 16
.endif
.ifc \vxr,%v17
\opd = 17
.endif
.ifc \vxr,%v18
\opd = 18
.endif
.ifc \vxr,%v19
\opd = 19
.endif
.ifc \vxr,%v20
\opd = 20
.endif
.ifc \vxr,%v21
\opd = 21
.endif
.ifc \vxr,%v22
\opd = 22
.endif
.ifc \vxr,%v23
\opd = 23
.endif
.ifc \vxr,%v24
\opd = 24
.endif
.ifc \vxr,%v25
\opd = 25
.endif
.ifc \vxr,%v26
\opd = 26
.endif
.ifc \vxr,%v27
\opd = 27
.endif
.ifc \vxr,%v28
\opd = 28
.endif
.ifc \vxr,%v29
\opd = 29
.endif
.ifc \vxr,%v30
\opd = 30
.endif
.ifc \vxr,%v31
\opd = 31
.endif
.if \opd == 255
\opd = \vxr
.endif
.endm
/* RXB - Compute most significant bit used vector registers
*
* @rxb: Operand to store computed RXB value
* @v1: First vector register designated operand
* @v2: Second vector register designated operand
* @v3: Third vector register designated operand
* @v4: Fourth vector register designated operand
*/
.macro RXB rxb v1 v2=0 v3=0 v4=0
\rxb = 0
.if \v1 & 0x10
\rxb = \rxb | 0x08
.endif
.if \v2 & 0x10
\rxb = \rxb | 0x04
.endif
.if \v3 & 0x10
\rxb = \rxb | 0x02
.endif
.if \v4 & 0x10
\rxb = \rxb | 0x01
.endif
.endm
/* MRXB - Generate Element Size Control and RXB value
*
* @m: Element size control
* @v1: First vector register designated operand (for RXB)
* @v2: Second vector register designated operand (for RXB)
* @v3: Third vector register designated operand (for RXB)
* @v4: Fourth vector register designated operand (for RXB)
*/
.macro MRXB m v1 v2=0 v3=0 v4=0
rxb = 0
RXB rxb, \v1, \v2, \v3, \v4
.byte (\m << 4) | rxb
.endm
/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
*
* @m: Element size control
* @opc: Opcode
* @v1: First vector register designated operand (for RXB)
* @v2: Second vector register designated operand (for RXB)
* @v3: Third vector register designated operand (for RXB)
* @v4: Fourth vector register designated operand (for RXB)
*/
.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
MRXB \m, \v1, \v2, \v3, \v4
.byte \opc
.endm
/* Vector support instructions */
/* VECTOR GENERATE BYTE MASK */
.macro VGBM vr imm2
VX_NUM v1, \vr
.word (0xE700 | ((v1&15) << 4))
.word \imm2
MRXBOPC 0, 0x44, v1
.endm
.macro VZERO vxr
VGBM \vxr, 0
.endm
.macro VONE vxr
VGBM \vxr, 0xFFFF
.endm
/* VECTOR LOAD VR ELEMENT FROM GR */
.macro VLVG v, gr, disp, m
VX_NUM v1, \v
GR_NUM b2, "%r0"
GR_NUM r3, \gr
.word 0xE700 | ((v1&15) << 4) | r3
.word (b2 << 12) | (\disp)
MRXBOPC \m, 0x22, v1
.endm
.macro VLVGB v, gr, index, base
VLVG \v, \gr, \index, \base, 0
.endm
.macro VLVGH v, gr, index
VLVG \v, \gr, \index, 1
.endm
.macro VLVGF v, gr, index
VLVG \v, \gr, \index, 2
.endm
.macro VLVGG v, gr, index
VLVG \v, \gr, \index, 3
.endm
/* VECTOR LOAD REGISTER */
.macro VLR v1, v2
VX_NUM v1, \v1
VX_NUM v2, \v2
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word 0
MRXBOPC 0, 0x56, v1, v2
.endm
/* VECTOR LOAD */
.macro VL v, disp, index="%r0", base
VX_NUM v1, \v
GR_NUM x2, \index
GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | x2
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x06, v1
.endm
/* VECTOR LOAD ELEMENT */
.macro VLEx vr1, disp, index="%r0", base, m3, opc
VX_NUM v1, \vr1
GR_NUM x2, \index
GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | x2
.word (b2 << 12) | (\disp)
MRXBOPC \m3, \opc, v1
.endm
.macro VLEB vr1, disp, index="%r0", base, m3
VLEx \vr1, \disp, \index, \base, \m3, 0x00
.endm
.macro VLEH vr1, disp, index="%r0", base, m3
VLEx \vr1, \disp, \index, \base, \m3, 0x01
.endm
.macro VLEF vr1, disp, index="%r0", base, m3
VLEx \vr1, \disp, \index, \base, \m3, 0x03
.endm
.macro VLEG vr1, disp, index="%r0", base, m3
VLEx \vr1, \disp, \index, \base, \m3, 0x02
.endm
/* VECTOR LOAD ELEMENT IMMEDIATE */
.macro VLEIx vr1, imm2, m3, opc
VX_NUM v1, \vr1
.word 0xE700 | ((v1&15) << 4)
.word \imm2
MRXBOPC \m3, \opc, v1
.endm
.macro VLEIB vr1, imm2, index
VLEIx \vr1, \imm2, \index, 0x40
.endm
.macro VLEIH vr1, imm2, index
VLEIx \vr1, \imm2, \index, 0x41
.endm
.macro VLEIF vr1, imm2, index
VLEIx \vr1, \imm2, \index, 0x43
.endm
.macro VLEIG vr1, imm2, index
VLEIx \vr1, \imm2, \index, 0x42
.endm
/* VECTOR LOAD GR FROM VR ELEMENT */
.macro VLGV gr, vr, disp, base="%r0", m
GR_NUM r1, \gr
GR_NUM b2, \base
VX_NUM v3, \vr
.word 0xE700 | (r1 << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC \m, 0x21, v3
.endm
.macro VLGVB gr, vr, disp, base="%r0"
VLGV \gr, \vr, \disp, \base, 0
.endm
.macro VLGVH gr, vr, disp, base="%r0"
VLGV \gr, \vr, \disp, \base, 1
.endm
.macro VLGVF gr, vr, disp, base="%r0"
VLGV \gr, \vr, \disp, \base, 2
.endm
.macro VLGVG gr, vr, disp, base="%r0"
VLGV \gr, \vr, \disp, \base, 3
.endm
/* VECTOR LOAD MULTIPLE */
.macro VLM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom
VX_NUM v3, \vto
GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC \hint, 0x36, v1, v3
.endm
/* VECTOR STORE */
.macro VST vr1, disp, index="%r0", base
VX_NUM v1, \vr1
GR_NUM x2, \index
GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | (x2&15)
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x0E, v1
.endm
/* VECTOR STORE MULTIPLE */
.macro VSTM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom
VX_NUM v3, \vto
GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC \hint, 0x3E, v1, v3
.endm
/* VECTOR PERMUTE */
.macro VPERM vr1, vr2, vr3, vr4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
VX_NUM v4, \vr4
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
.endm
/* VECTOR UNPACK LOGICAL LOW */
.macro VUPLL vr1, vr2, m3
VX_NUM v1, \vr1
VX_NUM v2, \vr2
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word 0x0000
MRXBOPC \m3, 0xD4, v1, v2
.endm
.macro VUPLLB vr1, vr2
VUPLL \vr1, \vr2, 0
.endm
.macro VUPLLH vr1, vr2
VUPLL \vr1, \vr2, 1
.endm
.macro VUPLLF vr1, vr2
VUPLL \vr1, \vr2, 2
.endm
/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
.macro VPDI vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0x84, v1, v2, v3
.endm
/* VECTOR REPLICATE */
.macro VREP vr1, vr3, imm2, m4
VX_NUM v1, \vr1
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word \imm2
MRXBOPC \m4, 0x4D, v1, v3
.endm
.macro VREPB vr1, vr3, imm2
VREP \vr1, \vr3, \imm2, 0
.endm
.macro VREPH vr1, vr3, imm2
VREP \vr1, \vr3, \imm2, 1
.endm
.macro VREPF vr1, vr3, imm2
VREP \vr1, \vr3, \imm2, 2
.endm
.macro VREPG vr1, vr3, imm2
VREP \vr1, \vr3, \imm2, 3
.endm
/* VECTOR MERGE HIGH */
.macro VMRH vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0x61, v1, v2, v3
.endm
.macro VMRHB vr1, vr2, vr3
VMRH \vr1, \vr2, \vr3, 0
.endm
.macro VMRHH vr1, vr2, vr3
VMRH \vr1, \vr2, \vr3, 1
.endm
.macro VMRHF vr1, vr2, vr3
VMRH \vr1, \vr2, \vr3, 2
.endm
.macro VMRHG vr1, vr2, vr3
VMRH \vr1, \vr2, \vr3, 3
.endm
/* VECTOR MERGE LOW */
.macro VMRL vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0x60, v1, v2, v3
.endm
.macro VMRLB vr1, vr2, vr3
VMRL \vr1, \vr2, \vr3, 0
.endm
.macro VMRLH vr1, vr2, vr3
VMRL \vr1, \vr2, \vr3, 1
.endm
.macro VMRLF vr1, vr2, vr3
VMRL \vr1, \vr2, \vr3, 2
.endm
.macro VMRLG vr1, vr2, vr3
VMRL \vr1, \vr2, \vr3, 3
.endm
/* Vector integer instructions */
/* VECTOR AND */
.macro VN vr1, vr2, vr3
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC 0, 0x68, v1, v2, v3
.endm
/* VECTOR EXCLUSIVE OR */
.macro VX vr1, vr2, vr3
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC 0, 0x6D, v1, v2, v3
.endm
/* VECTOR GALOIS FIELD MULTIPLY SUM */
.macro VGFM vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0xB4, v1, v2, v3
.endm
.macro VGFMB vr1, vr2, vr3
VGFM \vr1, \vr2, \vr3, 0
.endm
.macro VGFMH vr1, vr2, vr3
VGFM \vr1, \vr2, \vr3, 1
.endm
.macro VGFMF vr1, vr2, vr3
VGFM \vr1, \vr2, \vr3, 2
.endm
.macro VGFMG vr1, vr2, vr3
VGFM \vr1, \vr2, \vr3, 3
.endm
/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
.macro VGFMA vr1, vr2, vr3, vr4, m5
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
VX_NUM v4, \vr4
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12) | (\m5 << 8)
MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
.endm
.macro VGFMAB vr1, vr2, vr3, vr4
VGFMA \vr1, \vr2, \vr3, \vr4, 0
.endm
.macro VGFMAH vr1, vr2, vr3, vr4
VGFMA \vr1, \vr2, \vr3, \vr4, 1
.endm
.macro VGFMAF vr1, vr2, vr3, vr4
VGFMA \vr1, \vr2, \vr3, \vr4, 2
.endm
.macro VGFMAG vr1, vr2, vr3, vr4
VGFMA \vr1, \vr2, \vr3, \vr4, 3
.endm
/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
.macro VSRLB vr1, vr2, vr3
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC 0, 0x7D, v1, v2, v3
.endm
/* VECTOR REPLICATE IMMEDIATE */
.macro VREPI vr1, imm2, m3
VX_NUM v1, \vr1
.word 0xE700 | ((v1&15) << 4)
.word \imm2
MRXBOPC \m3, 0x45, v1
.endm
.macro VREPIB vr1, imm2
VREPI \vr1, \imm2, 0
.endm
.macro VREPIH vr1, imm2
VREPI \vr1, \imm2, 1
.endm
.macro VREPIF vr1, imm2
VREPI \vr1, \imm2, 2
.endm
.macro VREPIG vr1, imm2
VREP \vr1, \imm2, 3
.endm
/* VECTOR ADD */
.macro VA vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0xF3, v1, v2, v3
.endm
.macro VAB vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 0
.endm
.macro VAH vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 1
.endm
.macro VAF vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 2
.endm
.macro VAG vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 3
.endm
.macro VAQ vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 4
.endm
/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
.macro VESRAV vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0x7A, v1, v2, v3
.endm
.macro VESRAVB vr1, vr2, vr3
VESRAV \vr1, \vr2, \vr3, 0
.endm
.macro VESRAVH vr1, vr2, vr3
VESRAV \vr1, \vr2, \vr3, 1
.endm
.macro VESRAVF vr1, vr2, vr3
VESRAV \vr1, \vr2, \vr3, 2
.endm
.macro VESRAVG vr1, vr2, vr3
VESRAV \vr1, \vr2, \vr3, 3
.endm
/* VECTOR ELEMENT ROTATE LEFT LOGICAL */
.macro VERLL vr1, vr3, disp, base="%r0", m4
VX_NUM v1, \vr1
VX_NUM v3, \vr3
GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC \m4, 0x33, v1, v3
.endm
.macro VERLLB vr1, vr3, disp, base="%r0"
VERLL \vr1, \vr3, \disp, \base, 0
.endm
.macro VERLLH vr1, vr3, disp, base="%r0"
VERLL \vr1, \vr3, \disp, \base, 1
.endm
.macro VERLLF vr1, vr3, disp, base="%r0"
VERLL \vr1, \vr3, \disp, \base, 2
.endm
.macro VERLLG vr1, vr3, disp, base="%r0"
VERLL \vr1, \vr3, \disp, \base, 3
.endm
/* VECTOR SHIFT LEFT DOUBLE BY BYTE */
.macro VSLDB vr1, vr2, vr3, imm4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12) | (\imm4)
MRXBOPC 0, 0x77, v1, v2, v3
.endm
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_VX_INSN_INTERNAL_H */
|