+2011-05-24 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+
+ * config/tc-s390.c (md_gather_operands): Emit an error for odd
+ numbered registers used as register pair operand.
+
2011-05-23 Nick Clifton <nickc@redhat.com>
* config/tc-v850.h (TC_FX_SIZE_SLACK): Define.
&& ex.X_add_number == 0
&& warn_areg_zero)
as_warn (_("base register specified but zero"));
+ if ((operand->flags & S390_OPERAND_REG_EVEN)
+ && (ex.X_add_number & 1))
+ as_fatal (_("odd numbered register specified as register pair"));
s390_insert_operand (insn, operand, ex.X_add_number, NULL, 0);
}
}
+2011-05-24 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+
+ * gas/s390/esa-g5.d: Fix register pair operands.
+ * gas/s390/esa-g5.s: Likewise.
+ * gas/s390/esa-z9-109.d: Likewise.
+ * gas/s390/esa-z9-109.s: Likewise.
+ * gas/s390/zarch-z196.d: Likewise.
+ * gas/s390/zarch-z196.s: Likewise.
+ * gas/s390/zarch-z9-109.d: Likewise.
+ * gas/s390/zarch-z9-109.s: Likewise.
+ * gas/s390/zarch-z900.d: Likewise.
+ * gas/s390/zarch-z900.s: Likewise.
+ * gas/s390/zarch-z990.d: Likewise.
+ * gas/s390/zarch-z990.s: Likewise.
+
2011-05-20 Bernd Schmidt <bernds@codesourcery.com>
* gas/tic6x/pcr-relocs.d: New test.
.*: 3e 69 [ ]*aur %f6,%f9
.*: 6e 65 af ff [ ]*aw %f6,4095\(%r5,%r10\)
.*: 2e 69 [ ]*awr %f6,%f9
-.*: b3 4a 00 69 [ ]*axbr %f6,%f9
-.*: 36 69 [ ]*axr %f6,%f9
+.*: b3 4a 00 68 [ ]*axbr %f6,%f8
+.*: 36 68 [ ]*axr %f6,%f8
.*: 47 f5 af ff [ ]*b 4095\(%r5,%r10\)
.*: b2 40 00 69 [ ]*bakr %r6,%r9
.*: 45 65 af ff [ ]*bal %r6,4095\(%r5,%r10\)
.*: b3 95 00 69 [ ]*cdfbr %f6,%r9
.*: b3 b5 00 69 [ ]*cdfr %f6,%r9
.*: 29 69 [ ]*cdr %f6,%f9
-.*: bb 69 5f ff [ ]*cds %r6,%r9,4095\(%r5\)
+.*: bb 68 5f ff [ ]*cds %r6,%r8,4095\(%r5\)
.*: 79 65 af ff [ ]*ce %f6,4095\(%r5,%r10\)
.*: ed 65 af ff 00 09 [ ]*ceb %f6,4095\(%r5,%r10\)
.*: b3 09 00 69 [ ]*cebr %f6,%f9
.*: b2 1a 5f ff [ ]*cfc 4095\(%r5\)
.*: b3 99 50 69 [ ]*cfdbr %r6,5,%f9
.*: b3 98 50 69 [ ]*cfebr %r6,5,%f9
-.*: b3 9a 50 69 [ ]*cfxbr %r6,5,%f9
+.*: b3 9a 50 58 [ ]*cfxbr %r5,5,%f8
.*: b3 b9 90 65 [ ]*cfdr %r6,9,%f5
.*: b3 b8 90 65 [ ]*cfer %r6,9,%f5
-.*: b3 ba 90 65 [ ]*cfxr %r6,9,%f5
+.*: b3 ba 90 56 [ ]*cfxr %r5,9,%f6
.*: 49 65 af ff [ ]*ch %r6,4095\(%r5,%r10\)
.*: a7 6e 80 01 [ ]*chi %r6,-32767
.*: b2 41 00 69 [ ]*cksm %r6,%r9
.*: ba 69 5f ff [ ]*cs %r6,%r9,4095\(%r5\)
.*: b2 30 00 00 [ ]*csch
.*: b2 50 00 69 [ ]*csp %r6,%r9
-.*: b2 57 00 69 [ ]*cuse %r6,%r9
-.*: b2 a7 00 69 [ ]*cutfu %r6,%r9
-.*: b2 a6 00 69 [ ]*cuutf %r6,%r9
+.*: b2 57 00 68 [ ]*cuse %r6,%r8
+.*: b2 a7 00 68 [ ]*cutfu %r6,%r8
+.*: b2 a6 00 68 [ ]*cuutf %r6,%r8
.*: 4f 65 af ff [ ]*cvb %r6,4095\(%r5,%r10\)
.*: 4e 65 af ff [ ]*cvd %r6,4095\(%r5,%r10\)
-.*: b3 49 00 69 [ ]*cxbr %f6,%f9
+.*: b3 49 00 68 [ ]*cxbr %f6,%f8
.*: b3 96 00 69 [ ]*cxfbr %f6,%r9
.*: b3 b6 00 69 [ ]*cxfr %f6,%r9
.*: b3 69 00 69 [ ]*cxr %f6,%f9
.*: b3 53 9a 65 [ ]*diebr %f6,%f9,%f5,10
.*: fd 58 5f ff af ff [ ]*dp 4095\(6,%r5\),4095\(9,%r10\)
.*: 1d 69 [ ]*dr %r6,%r9
-.*: b3 4d 00 69 [ ]*dxbr %f6,%f9
-.*: b2 2d 00 69 [ ]*dxr %f6,%f9
+.*: b3 4d 00 68 [ ]*dxbr %f6,%f8
+.*: b2 2d 00 68 [ ]*dxr %f6,%f8
.*: b2 4f 00 69 [ ]*ear %r6,%a9
.*: de ff 5f ff af ff [ ]*ed 4095\(256,%r5\),4095\(%r10\)
.*: df ff 5f ff af ff [ ]*edmk 4095\(256,%r5\),4095\(%r10\)
.*: b3 7f 00 69 [ ]*fidr %f6,%f9
.*: b3 57 50 69 [ ]*fiebr %f6,5,%f9
.*: b3 77 00 69 [ ]*fier %f6,%f9
-.*: b3 47 50 69 [ ]*fixbr %f6,5,%f9
-.*: b3 67 00 69 [ ]*fixr %f6,%f9
+.*: b3 47 50 68 [ ]*fixbr %f6,5,%f8
+.*: b3 67 00 68 [ ]*fixr %f6,%f8
.*: 24 69 [ ]*hdr %f6,%f9
.*: 34 69 [ ]*her %f6,%f9
.*: b2 31 00 00 [ ]*hsch
.*: 33 69 [ ]*lcer %f6,%f9
.*: 13 69 [ ]*lcr %r6,%r9
.*: b7 69 5f ff [ ]*lctl %c6,%c9,4095\(%r5\)
-.*: b3 43 00 69 [ ]*lcxbr %f6,%f9
-.*: b3 63 00 69 [ ]*lcxr %f6,%f9
+.*: b3 43 00 68 [ ]*lcxbr %f6,%f8
+.*: b3 63 00 68 [ ]*lcxr %f6,%f8
.*: 68 65 af ff [ ]*ld %f6,4095\(%r5,%r10\)
.*: ed 65 af ff 00 24 [ ]*lde %f6,4095\(%r5,%r10\)
.*: ed 65 af ff 00 04 [ ]*ldeb %f6,4095\(%r5,%r10\)
.*: b3 04 00 69 [ ]*ldebr %f6,%f9
.*: b3 24 00 69 [ ]*lder %f6,%f9
.*: 28 69 [ ]*ldr %f6,%f9
-.*: b3 45 00 69 [ ]*ldxbr %f6,%f9
-.*: 25 69 [ ]*ldxr %f6,%f9
+.*: b3 45 00 68 [ ]*ldxbr %f6,%f8
+.*: 25 68 [ ]*ldxr %f6,%f8
.*: 78 65 af ff [ ]*le %f6,4095\(%r5,%r10\)
.*: b3 44 00 69 [ ]*ledbr %f6,%f9
.*: 35 69 [ ]*ledr %f6,%f9
.*: 38 69 [ ]*ler %f6,%f9
-.*: b3 46 00 69 [ ]*lexbr %f6,%f9
-.*: b3 66 00 69 [ ]*lexr %f6,%f9
+.*: b3 46 00 68 [ ]*lexbr %f6,%f8
+.*: b3 66 00 68 [ ]*lexr %f6,%f8
.*: b2 9d 5f ff [ ]*lfpc 4095\(%r5\)
.*: 48 65 af ff [ ]*lh %r6,4095\(%r5,%r10\)
.*: a7 68 80 01 [ ]*lhi %r6,-32767
.*: b3 01 00 69 [ ]*lnebr %f6,%f9
.*: 31 69 [ ]*lner %f6,%f9
.*: 11 69 [ ]*lnr %r6,%r9
-.*: b3 41 00 69 [ ]*lnxbr %f6,%f9
-.*: b3 61 00 69 [ ]*lnxr %f6,%f9
+.*: b3 41 00 68 [ ]*lnxbr %f6,%f8
+.*: b3 61 00 68 [ ]*lnxr %f6,%f8
.*: b3 10 00 69 [ ]*lpdbr %f6,%f9
.*: 20 69 [ ]*lpdr %f6,%f9
.*: b3 00 00 69 [ ]*lpebr %f6,%f9
.*: 30 69 [ ]*lper %f6,%f9
.*: 10 69 [ ]*lpr %r6,%r9
.*: 82 00 5f ff [ ]*lpsw 4095\(%r5\)
-.*: b3 40 00 69 [ ]*lpxbr %f6,%f9
-.*: b3 60 00 69 [ ]*lpxr %f6,%f9
+.*: b3 40 00 68 [ ]*lpxbr %f6,%f8
+.*: b3 60 00 68 [ ]*lpxr %f6,%f8
.*: 18 69 [ ]*lr %r6,%r9
.*: b1 65 af ff [ ]*lra %r6,4095\(%r5,%r10\)
-.*: 25 69 [ ]*ldxr %f6,%f9
+.*: 25 78 [ ]*ldxr %f7,%f8
.*: 35 69 [ ]*ledr %f6,%f9
.*: b3 12 00 69 [ ]*ltdbr %f6,%f9
.*: 22 69 [ ]*ltdr %f6,%f9
.*: b3 02 00 69 [ ]*ltebr %f6,%f9
.*: 32 69 [ ]*lter %f6,%f9
.*: 12 69 [ ]*ltr %r6,%r9
-.*: b3 42 00 69 [ ]*ltxbr %f6,%f9
-.*: b3 62 00 69 [ ]*ltxr %f6,%f9
+.*: b3 42 00 68 [ ]*ltxbr %f6,%f8
+.*: b3 62 00 68 [ ]*ltxr %f6,%f8
.*: b2 4b 00 69 [ ]*lura %r6,%r9
.*: ed 65 af ff 00 25 [ ]*lxd %f6,4095\(%r5,%r10\)
.*: ed 65 af ff 00 05 [ ]*lxdb %f6,4095\(%r5,%r10\)
.*: ed 65 af ff 00 06 [ ]*lxeb %f6,4095\(%r5,%r10\)
.*: b3 06 00 69 [ ]*lxebr %f6,%f9
.*: b3 26 00 69 [ ]*lxer %f6,%f9
-.*: b3 65 00 69 [ ]*lxr %f6,%f9
+.*: b3 65 00 68 [ ]*lxr %f6,%f8
.*: b3 75 00 60 [ ]*lzdr %f6
.*: b3 74 00 60 [ ]*lzer %f6
.*: b3 76 00 60 [ ]*lzxr %f6
.*: e8 ff 5f ff af ff [ ]*mvcin 4095\(256,%r5\),4095\(%r10\)
.*: d9 69 5f ff af ff [ ]*mvck 4095\(%r6,%r5\),4095\(%r10\),%r9
.*: 0e 69 [ ]*mvcl %r6,%r9
-.*: a8 69 5f ff [ ]*mvcle %r6,%r9,4095\(%r5\)
-.*: eb 69 5f ff 00 8e [ ]*mvclu %r6,%r9,4095\(%r5\)
+.*: a8 68 5f ff [ ]*mvcle %r6,%r8,4095\(%r5\)
+.*: eb 68 5f ff 00 8e [ ]*mvclu %r6,%r8,4095\(%r5\)
.*: da 69 5f ff af ff [ ]*mvcp 4095\(%r6,%r5\),4095\(%r10\),%r9
.*: db 69 5f ff af ff [ ]*mvcs 4095\(%r6,%r5\),4095\(%r10\),%r9
.*: e5 0e 5f ff af ff [ ]*mvcsk 4095\(%r5\),4095\(%r10\)
.*: b2 54 00 69 [ ]*mvpg %r6,%r9
.*: b2 55 00 69 [ ]*mvst %r6,%r9
.*: d3 ff 5f ff af ff [ ]*mvz 4095\(256,%r5\),4095\(%r10\)
-.*: b3 4c 00 69 [ ]*mxbr %f6,%f9
+.*: b3 4c 00 68 [ ]*mxbr %f6,%f8
.*: 67 65 af ff [ ]*mxd %f6,4095\(%r5,%r10\)
.*: ed 65 af ff 00 07 [ ]*mxdb %f6,4095\(%r5,%r10\)
.*: b3 07 00 69 [ ]*mxdbr %f6,%f9
.*: 27 69 [ ]*mxdr %f6,%f9
-.*: 26 69 [ ]*mxr %f6,%f9
+.*: 26 68 [ ]*mxr %f6,%f8
.*: 54 65 af ff [ ]*n %r6,4095\(%r5,%r10\)
.*: d4 ff 5f ff af ff [ ]*nc 4095\(256,%r5\),4095\(%r10\)
.*: 94 ff 5f ff [ ]*ni 4095\(%r5\),255
.*: ed 65 af ff 00 14 [ ]*sqeb %f6,4095\(%r5,%r10\)
.*: b3 14 00 69 [ ]*sqebr %f6,%f9
.*: b2 45 00 69 [ ]*sqer %f6,%f9
-.*: b3 16 00 69 [ ]*sqxbr %f6,%f9
-.*: b3 36 00 69 [ ]*sqxr %f6,%f9
+.*: b3 16 00 68 [ ]*sqxbr %f6,%f8
+.*: b3 36 00 68 [ ]*sqxr %f6,%f8
.*: 1b 69 [ ]*sr %r6,%r9
.*: 8a 60 5f ff [ ]*sra %r6,4095\(%r5\)
.*: 8e 60 5f ff [ ]*srda %r6,4095\(%r5\)
.*: 0a ff [ ]*svc 255
.*: 6f 65 af ff [ ]*sw %f6,4095\(%r5,%r10\)
.*: 2f 69 [ ]*swr %f6,%f9
-.*: b3 4b 00 69 [ ]*sxbr %f6,%f9
-.*: 37 69 [ ]*sxr %f6,%f9
+.*: b3 4b 00 68 [ ]*sxbr %f6,%f8
+.*: 37 68 [ ]*sxr %f6,%f8
.*: b2 4c 00 69 [ ]*tar %a6,%r9
.*: b2 2c 00 06 [ ]*tb %r6
.*: b3 51 50 69 [ ]*tbdr %f6,5,%f9
.*: 01 ff [ ]*trap2
.*: b2 ff 5f ff [ ]*trap4 4095\(%r5\)
.*: b2 a5 00 69 [ ]*tre %r6,%r9
-.*: b9 93 00 69 [ ]*troo %r6,%r9
-.*: b9 92 00 69 [ ]*trot %r6,%r9
+.*: b9 93 00 68 [ ]*troo %r6,%r8
+.*: b9 92 00 68 [ ]*trot %r6,%r8
.*: dd ff 5f ff af ff [ ]*trt 4095\(256,%r5\),4095\(%r10\)
-.*: b9 91 00 69 [ ]*trto %r6,%r9
-.*: b9 90 00 69 [ ]*trtt %r6,%r9
+.*: b9 91 00 68 [ ]*trto %r6,%r8
+.*: b9 90 00 68 [ ]*trtt %r6,%r8
.*: 93 00 5f ff [ ]*ts 4095\(%r5\)
.*: b2 35 5f ff [ ]*tsch 4095\(%r5\)
.*: f3 58 5f ff af ff [ ]*unpk 4095\(6,%r5\),4095\(9,%r10\)
aur %f6,%f9
aw %f6,4095(%r5,%r10)
awr %f6,%f9
- axbr %f6,%f9
- axr %f6,%f9
+ axbr %f6,%f8
+ axr %f6,%f8
b 4095(%r5,%r10)
bakr %r6,%r9
bal %r6,4095(%r5,%r10)
cdfbr %f6,%r9
cdfr %f6,%r9
cdr %f6,%f9
- cds %r6,%r9,4095(%r5)
+ cds %r6,%r8,4095(%r5)
ce %f6,4095(%r5,%r10)
ceb %f6,4095(%r5,%r10)
cebr %f6,%f9
cfc 4095(%r5)
cfdbr %r6,5,%f9
cfebr %r6,5,%f9
- cfxbr %r6,5,%f9
+ cfxbr %r5,5,%f8
cfdr %r6,9,%f5
cfer %r6,9,%f5
- cfxr %r6,9,%f5
+ cfxr %r5,9,%f6
ch %r6,4095(%r5,%r10)
chi %r6,-32767
cksm %r6,%r9
cs %r6,%r9,4095(%r5)
csch
csp %r6,%r9
- cuse %r6,%r9
- cutfu %r6,%r9
- cuutf %r6,%r9
+ cuse %r6,%r8
+ cutfu %r6,%r8
+ cuutf %r6,%r8
cvb %r6,4095(%r5,%r10)
cvd %r6,4095(%r5,%r10)
- cxbr %f6,%f9
+ cxbr %f6,%f8
cxfbr %f6,%r9
cxfr %f6,%r9
cxr %f6,%f9
diebr %f6,%r9,%r5,10
dp 4095(6,%r5),4095(9,%r10)
dr %r6,%r9
- dxbr %f6,%f9
- dxr %f6,%f9
+ dxbr %f6,%f8
+ dxr %f6,%f8
ear %r6,%a9
ed 4095(256,%r5),4095(%r10)
edmk 4095(256,%r5),4095(%r10)
fidr %f6,%f9
fiebr %f6,5,%f9
fier %f6,%f9
- fixbr %f6,5,%f9
- fixr %f6,%f9
+ fixbr %f6,5,%f8
+ fixr %f6,%f8
hdr %f6,%f9
her %f6,%f9
hsch
lcer %f6,%f9
lcr %r6,%r9
lctl %c6,%c9,4095(%r5)
- lcxbr %f6,%f9
- lcxr %f6,%f9
+ lcxbr %f6,%f8
+ lcxr %f6,%f8
ld %f6,4095(%r5,%r10)
lde %f6,4095(%r5,%r10)
ldeb %f6,4095(%r5,%r10)
ldebr %f6,%f9
lder %f6,%f9
ldr %f6,%f9
- ldxbr %f6,%f9
- ldxr %f6,%f9
+ ldxbr %f6,%f8
+ ldxr %f6,%f8
le %f6,4095(%r5,%r10)
ledbr %f6,%f9
ledr %f6,%f9
ler %f6,%f9
- lexbr %f6,%f9
- lexr %f6,%f9
+ lexbr %f6,%f8
+ lexr %f6,%f8
lfpc 4095(%r5)
lh %r6,4095(%r5,%r10)
lhi %r6,-32767
lnebr %f6,%f9
lner %f6,%f9
lnr %r6,%r9
- lnxbr %f6,%f9
- lnxr %f6,%f9
+ lnxbr %f6,%f8
+ lnxr %f6,%f8
lpdbr %f6,%f9
lpdr %f6,%f9
lpebr %f6,%f9
lper %f6,%f9
lpr %r6,%r9
lpsw 4095(%r5)
- lpxbr %f6,%f9
- lpxr %f6,%f9
+ lpxbr %f6,%f8
+ lpxr %f6,%f8
lr %r6,%r9
lra %r6,4095(%r5,%r10)
- lrdr %f6,%f9
+ lrdr %f7,%f8
lrer %f6,%f9
ltdbr %f6,%f9
ltdr %f6,%f9
ltebr %f6,%f9
lter %f6,%f9
ltr %r6,%r9
- ltxbr %f6,%f9
- ltxr %f6,%f9
+ ltxbr %f6,%f8
+ ltxr %f6,%f8
lura %r6,%r9
lxd %f6,4095(%r5,%r10)
lxdb %f6,4095(%r5,%r10)
lxeb %f6,4095(%r5,%r10)
lxebr %f6,%f9
lxer %f6,%f9
- lxr %f6,%f9
+ lxr %f6,%f8
lzdr %f6
lzer %f6
lzxr %f6
mvcin 4095(256,%r5),4095(%r10)
mvck 4095(%r6,%r5),4095(%r10),%r9
mvcl %r6,%r9
- mvcle %r6,%r9,4095(%r5)
- mvclu %r6,%r9,4095(%r5)
+ mvcle %r6,%r8,4095(%r5)
+ mvclu %r6,%r8,4095(%r5)
mvcp 4095(%r6,%r5),4095(%r10),%r9
mvcs 4095(%r6,%r5),4095(%r10),%r9
mvcsk 4095(%r5),4095(%r10)
mvpg %r6,%r9
mvst %r6,%r9
mvz 4095(256,%r5),4095(%r10)
- mxbr %f6,%f9
+ mxbr %f6,%f8
mxd %f6,4095(%r5,%r10)
mxdb %f6,4095(%r5,%r10)
mxdbr %f6,%f9
mxdr %f6,%f9
- mxr %f6,%f9
+ mxr %f6,%f8
n %r6,4095(%r5,%r10)
nc 4095(256,%r5),4095(%r10)
ni 4095(%r5),255
sqeb %f6,4095(%r5,%r10)
sqebr %f6,%f9
sqer %f6,%f9
- sqxbr %f6,%f9
- sqxr %f6,%f9
+ sqxbr %f6,%f8
+ sqxr %f6,%f8
sr %r6,%r9
sra %r6,4095(%r5)
srda %r6,4095(%r5)
svc 255
sw %f6,4095(%r5,%r10)
swr %f6,%f9
- sxbr %f6,%f9
- sxr %f6,%f9
+ sxbr %f6,%f8
+ sxr %f6,%f8
tar %a6,%r9
tb %r6
tbdr %r6,5,%r9
trap2
trap4 4095(%r5)
tre %r6,%r9
- troo %r6,%r9
- trot %r6,%r9
+ troo %r6,%r8
+ trot %r6,%r8
trt 4095(256,%r5),4095(%r10)
- trto %r6,%r9
- trtt %r6,%r9
+ trto %r6,%r8
+ trtt %r6,%r8
ts 4095(%r5)
tsch 4095(%r5)
unpk 4095(6,%r5),4095(9,%r10)
Disassembly of section .text:
.* <foo>:
-.*: b9 93 f0 69 [ ]*troo %r6,%r9,15
-.*: b9 92 f0 69 [ ]*trot %r6,%r9,15
-.*: b9 91 f0 69 [ ]*trto %r6,%r9,15
-.*: b9 90 f0 69 [ ]*trtt %r6,%r9,15
+.*: b9 93 f0 68 [ ]*troo %r6,%r8,15
+.*: b9 92 f0 68 [ ]*trot %r6,%r8,15
+.*: b9 91 f0 68 [ ]*trto %r6,%r8,15
+.*: b9 90 f0 68 [ ]*trtt %r6,%r8,15
.*: b2 2b 00 69 [ ]*sske %r6,%r9
.text
foo:
- troo %r6,%r9,15
- trot %r6,%r9,15
- trto %r6,%r9,15
- trtt %r6,%r9,15
+ troo %r6,%r8,15
+ trot %r6,%r8,15
+ trto %r6,%r8,15
+ trtt %r6,%r8,15
# z9-109 z/Architecture mode extended sske with an additional parameter
# make sure the old version still works for esa
sske %r6,%r9
.*: b9 ae 00 67 [ ]*rrbm %r6,%r7
.*: b3 94 37 59 [ ]*cefbra %f5,3,%r9,7
.*: b3 95 37 59 [ ]*cdfbra %f5,3,%r9,7
-.*: b3 96 37 59 [ ]*cxfbra %f5,3,%r9,7
+.*: b3 96 37 69 [ ]*cxfbra %f6,3,%r9,7
.*: b3 a4 37 59 [ ]*cegbra %f5,3,%r9,7
.*: b3 a5 37 59 [ ]*cdgbra %f5,3,%r9,7
-.*: b3 a6 37 59 [ ]*cxgbra %f5,3,%r9,7
+.*: b3 a6 37 69 [ ]*cxgbra %f6,3,%r9,7
.*: b3 90 37 59 [ ]*celfbr %f5,3,%r9,7
.*: b3 91 37 59 [ ]*cdlfbr %f5,3,%r9,7
-.*: b3 92 37 59 [ ]*cxlfbr %f5,3,%r9,7
+.*: b3 92 37 69 [ ]*cxlfbr %f6,3,%r9,7
.*: b3 a0 37 59 [ ]*celgbr %f5,3,%r9,7
.*: b3 a1 37 59 [ ]*cdlgbr %f5,3,%r9,7
-.*: b3 a2 37 59 [ ]*cxlgbr %f5,3,%r9,7
+.*: b3 a2 37 69 [ ]*cxlgbr %f6,3,%r9,7
.*: b3 98 37 59 [ ]*cfebra %r5,3,%f9,7
.*: b3 99 37 59 [ ]*cfdbra %r5,3,%f9,7
-.*: b3 9a 37 59 [ ]*cfxbra %r5,3,%f9,7
+.*: b3 9a 37 58 [ ]*cfxbra %r5,3,%f8,7
.*: b3 a8 37 59 [ ]*cgebra %r5,3,%f9,7
.*: b3 a9 37 59 [ ]*cgdbra %r5,3,%f9,7
-.*: b3 aa 37 59 [ ]*cgxbra %r5,3,%f9,7
+.*: b3 aa 37 58 [ ]*cgxbra %r5,3,%f8,7
.*: b3 9c 37 59 [ ]*clfebr %r5,3,%f9,7
.*: b3 9d 37 59 [ ]*clfdbr %r5,3,%f9,7
-.*: b3 9e 37 59 [ ]*clfxbr %r5,3,%f9,7
+.*: b3 9e 37 58 [ ]*clfxbr %r5,3,%f8,7
.*: b3 ac 37 59 [ ]*clgebr %r5,3,%f9,7
.*: b3 ad 37 59 [ ]*clgdbr %r5,3,%f9,7
-.*: b3 ae 37 59 [ ]*clgxbr %r5,3,%f9,7
+.*: b3 ae 37 58 [ ]*clgxbr %r5,3,%f8,7
.*: b3 57 37 59 [ ]*fiebra %f5,3,%f9,7
.*: b3 5f 37 59 [ ]*fidbra %f5,3,%f9,7
-.*: b3 47 37 59 [ ]*fixbra %f5,3,%f9,7
+.*: b3 47 37 68 [ ]*fixbra %f6,3,%f8,7
.*: b3 44 37 59 [ ]*ledbra %f5,3,%f9,7
-.*: b3 45 37 59 [ ]*ldxbra %f5,3,%f9,7
-.*: b3 46 37 59 [ ]*lexbra %f5,3,%f9,7
+.*: b3 45 37 68 [ ]*ldxbra %f6,3,%f8,7
+.*: b3 46 37 68 [ ]*lexbra %f6,3,%f8,7
.*: b3 d2 97 35 [ ]*adtra %f3,%f5,%f9,7
-.*: b3 da 97 35 [ ]*axtra %f3,%f5,%f9,7
+.*: b3 da 67 24 [ ]*axtra %f2,%f4,%f6,7
.*: b3 f1 37 59 [ ]*cdgtra %f5,3,%r9,7
.*: b9 51 37 59 [ ]*cdftr %f5,3,%r9,7
-.*: b9 59 37 59 [ ]*cxftr %f5,3,%r9,7
-.*: b3 f9 37 59 [ ]*cxgtra %f5,3,%r9,7
+.*: b9 59 37 69 [ ]*cxftr %f6,3,%r9,7
+.*: b3 f9 37 69 [ ]*cxgtra %f6,3,%r9,7
.*: b9 52 37 59 [ ]*cdlgtr %f5,3,%r9,7
-.*: b9 5a 37 59 [ ]*cxlgtr %f5,3,%r9,7
+.*: b9 5a 37 69 [ ]*cxlgtr %f6,3,%r9,7
.*: b9 53 37 59 [ ]*cdlftr %f5,3,%r9,7
.*: b9 5b 37 59 [ ]*cxlftr %f5,3,%r9,7
.*: b3 e1 37 59 [ ]*cgdtra %r5,3,%f9,7
-.*: b3 e9 37 59 [ ]*cgxtra %r5,3,%f9,7
+.*: b3 e9 37 58 [ ]*cgxtra %r5,3,%f8,7
.*: b9 41 37 59 [ ]*cfdtr %r5,3,%f9,7
.*: b9 49 37 59 [ ]*cfxtr %r5,3,%f9,7
.*: b9 42 37 59 [ ]*clgdtr %r5,3,%f9,7
-.*: b9 4a 37 59 [ ]*clgxtr %r5,3,%f9,7
+.*: b9 4a 37 58 [ ]*clgxtr %r5,3,%f8,7
.*: b9 43 37 59 [ ]*clfdtr %r5,3,%f9,7
-.*: b9 4b 37 59 [ ]*clfxtr %r5,3,%f9,7
+.*: b9 4b 37 58 [ ]*clfxtr %r5,3,%f8,7
.*: b3 d1 97 35 [ ]*ddtra %f3,%f5,%f9,7
-.*: b3 d9 97 35 [ ]*dxtra %f3,%f5,%f9,7
+.*: b3 d9 67 24 [ ]*dxtra %f2,%f4,%f6,7
.*: b3 d0 97 35 [ ]*mdtra %f3,%f5,%f9,7
-.*: b3 d8 97 35 [ ]*mxtra %f3,%f5,%f9,7
+.*: b3 d8 67 24 [ ]*mxtra %f2,%f4,%f6,7
.*: b3 d3 97 35 [ ]*sdtra %f3,%f5,%f9,7
-.*: b3 db 97 35 [ ]*sxtra %f3,%f5,%f9,7
+.*: b3 db 67 24 [ ]*sxtra %f2,%f4,%f6,7
.*: b2 b8 7f a0 [ ]*srnmb 4000\(%r7\)
cefbra %f5,3,%r9,7
cdfbra %f5,3,%r9,7
- cxfbra %f5,3,%r9,7
+ cxfbra %f6,3,%r9,7
cegbra %f5,3,%r9,7
cdgbra %f5,3,%r9,7
- cxgbra %f5,3,%r9,7
+ cxgbra %f6,3,%r9,7
celfbr %f5,3,%r9,7
cdlfbr %f5,3,%r9,7
- cxlfbr %f5,3,%r9,7
+ cxlfbr %f6,3,%r9,7
celgbr %f5,3,%r9,7
cdlgbr %f5,3,%r9,7
- cxlgbr %f5,3,%r9,7
+ cxlgbr %f6,3,%r9,7
cfebra %r5,3,%f9,7
cfdbra %r5,3,%f9,7
- cfxbra %r5,3,%f9,7
+ cfxbra %r5,3,%f8,7
cgebra %r5,3,%f9,7
cgdbra %r5,3,%f9,7
- cgxbra %r5,3,%f9,7
+ cgxbra %r5,3,%f8,7
clfebr %r5,3,%f9,7
clfdbr %r5,3,%f9,7
- clfxbr %r5,3,%f9,7
+ clfxbr %r5,3,%f8,7
clgebr %r5,3,%f9,7
clgdbr %r5,3,%f9,7
- clgxbr %r5,3,%f9,7
+ clgxbr %r5,3,%f8,7
fiebra %f5,3,%f9,7
fidbra %f5,3,%f9,7
- fixbra %f5,3,%f9,7
+ fixbra %f6,3,%f8,7
ledbra %f5,3,%f9,7
- ldxbra %f5,3,%f9,7
- lexbra %f5,3,%f9,7
+ ldxbra %f6,3,%f8,7
+ lexbra %f6,3,%f8,7
adtra %f3,%f5,%f9,7
- axtra %f3,%f5,%f9,7
+ axtra %f2,%f4,%f6,7
cdgtra %f5,3,%r9,7
cdftr %f5,3,%r9,7
- cxftr %f5,3,%r9,7
- cxgtra %f5,3,%r9,7
+ cxftr %f6,3,%r9,7
+ cxgtra %f6,3,%r9,7
cdlgtr %f5,3,%r9,7
- cxlgtr %f5,3,%r9,7
+ cxlgtr %f6,3,%r9,7
cdlftr %f5,3,%r9,7
cxlftr %f5,3,%r9,7
cgdtra %r5,3,%f9,7
- cgxtra %r5,3,%f9,7
+ cgxtra %r5,3,%f8,7
cfdtr %r5,3,%f9,7
cfxtr %r5,3,%f9,7
clgdtr %r5,3,%f9,7
- clgxtr %r5,3,%f9,7
+ clgxtr %r5,3,%f8,7
clfdtr %r5,3,%f9,7
- clfxtr %r5,3,%f9,7
+ clfxtr %r5,3,%f8,7
ddtra %f3,%f5,%f9,7
- dxtra %f3,%f5,%f9,7
+ dxtra %f2,%f4,%f6,7
mdtra %f3,%f5,%f9,7
- mxtra %f3,%f5,%f9,7
+ mxtra %f2,%f4,%f6,7
sdtra %f3,%f5,%f9,7
- sxtra %f3,%f5,%f9,7
+ sxtra %f2,%f4,%f6,7
srnmb 4000(%r7)
.*: c8 60 5f ff af ff [ ]*mvcos 4095\(%r5\),4095\(%r10\),%r6
.*: b9 aa 5f 69 [ ]*lptea %r6,%r9,%r5,15
.*: b2 2b f0 69 [ ]*sske %r6,%r9,15
-.*: b9 b1 f0 69 [ ]*cu24 %r6,%r9,15
-.*: b2 a6 f0 69 [ ]*cu21 %r6,%r9,15
-.*: b9 b3 00 69 [ ]*cu42 %r6,%r9
-.*: b9 b2 00 69 [ ]*cu41 %r6,%r9
-.*: b2 a7 f0 69 [ ]*cu12 %r6,%r9,15
-.*: b9 b0 f0 69 [ ]*cu14 %r6,%r9,15
+.*: b9 b1 f0 68 [ ]*cu24 %r6,%r8,15
+.*: b2 a6 f0 68 [ ]*cu21 %r6,%r8,15
+.*: b9 b3 00 68 [ ]*cu42 %r6,%r8
+.*: b9 b2 00 68 [ ]*cu41 %r6,%r8
+.*: b2 a7 f0 68 [ ]*cu12 %r6,%r8,15
+.*: b9 b0 f0 68 [ ]*cu14 %r6,%r8,15
.*: b3 3b 60 95 [ ]*myr %f6,%f9,%f5
.*: b3 3d 60 95 [ ]*myhr %f6,%f9,%f5
.*: b3 39 60 95 [ ]*mylr %f6,%f9,%f5
mvcos 4095(%r5),4095(%r10),%r6
lptea %r6,%r9,%r5,15
sske %r6,%r9,15
- cu24 %r6,%r9,15
- cu21 %r6,%r9,15
- cu42 %r6,%r9
- cu41 %r6,%r9
- cu12 %r6,%r9,15
- cu14 %r6,%r9,15
+ cu24 %r6,%r8,15
+ cu21 %r6,%r8,15
+ cu42 %r6,%r8
+ cu41 %r6,%r8
+ cu12 %r6,%r8,15
+ cu14 %r6,%r8,15
myr %f6,%f9,%f5
myhr %f6,%f9,%f5
mylr %f6,%f9,%f5
.*: eb 96 5f ff 00 45 [ ]*bxleg %r9,%r6,4095\(%r5\)
.*: b3 a5 00 96 [ ]*cdgbr %f9,%r6
.*: b3 c5 00 96 [ ]*cdgr %f9,%r6
-.*: eb 96 5f ff 00 3e [ ]*cdsg %r9,%r6,4095\(%r5\)
+.*: eb 86 5f ff 00 3e [ ]*cdsg %r8,%r6,4095\(%r5\)
.*: b3 a4 00 96 [ ]*cegbr %f9,%r6
.*: b3 c4 00 96 [ ]*cegr %f9,%r6
.*: e3 95 af ff 00 20 [ ]*cg %r9,4095\(%r5,%r10\)
.*: b9 30 00 96 [ ]*cgfr %r9,%r6
.*: a7 9f 80 01 [ ]*cghi %r9,-32767
.*: b9 20 00 96 [ ]*cgr %r9,%r6
-.*: b3 aa f0 65 [ ]*cgxbr %r6,15,%f5
-.*: b3 ca f0 65 [ ]*cgxr %r6,15,%f5
+.*: b3 aa f0 64 [ ]*cgxbr %r6,15,%f4
+.*: b3 ca f0 64 [ ]*cgxr %r6,15,%f4
.*: e3 95 af ff 00 21 [ ]*clg %r9,4095\(%r5,%r10\)
.*: e3 95 af ff 00 31 [ ]*clgf %r9,4095\(%r5,%r10\)
.*: b9 31 00 96 [ ]*clgfr %r9,%r6
.*: eb 96 5f ff 00 30 [ ]*csg %r9,%r6,4095\(%r5\)
.*: e3 95 af ff 00 0e [ ]*cvbg %r9,4095\(%r5,%r10\)
.*: e3 95 af ff 00 2e [ ]*cvdg %r9,4095\(%r5,%r10\)
-.*: b3 a6 00 96 [ ]*cxgbr %f9,%r6
-.*: b3 c6 00 96 [ ]*cxgr %f9,%r6
-.*: e3 95 af ff 00 87 [ ]*dlg %r9,4095\(%r5,%r10\)
-.*: b9 87 00 96 [ ]*dlgr %r9,%r6
-.*: e3 95 af ff 00 0d [ ]*dsg %r9,4095\(%r5,%r10\)
-.*: e3 95 af ff 00 1d [ ]*dsgf %r9,4095\(%r5,%r10\)
-.*: b9 1d 00 96 [ ]*dsgfr %r9,%r6
-.*: b9 0d 00 96 [ ]*dsgr %r9,%r6
+.*: b3 a6 00 86 [ ]*cxgbr %f8,%r6
+.*: b3 c6 00 86 [ ]*cxgr %f8,%r6
+.*: e3 85 af ff 00 87 [ ]*dlg %r8,4095\(%r5,%r10\)
+.*: b9 87 00 86 [ ]*dlgr %r8,%r6
+.*: e3 85 af ff 00 0d [ ]*dsg %r8,4095\(%r5,%r10\)
+.*: e3 85 af ff 00 1d [ ]*dsgf %r8,4095\(%r5,%r10\)
+.*: b9 1d 00 86 [ ]*dsgfr %r8,%r6
+.*: b9 0d 00 86 [ ]*dsgr %r8,%r6
.*: b9 0e 00 96 [ ]*eregg %r9,%r6
.*: b9 9d 00 90 [ ]*esea %r9
.*: eb 9a 5f ff 00 80 [ ]*icmh %r9,10,4095\(%r5\)
.*: b9 01 00 96 [ ]*lngr %r9,%r6
.*: b9 10 00 96 [ ]*lpgfr %r9,%r6
.*: b9 00 00 96 [ ]*lpgr %r9,%r6
-.*: e3 95 af ff 00 8f [ ]*lpq %r9,4095\(%r5,%r10\)
+.*: e3 85 af ff 00 8f [ ]*lpq %r8,4095\(%r5,%r10\)
.*: b2 b2 5f ff [ ]*lpswe 4095\(%r5\)
.*: e3 95 af ff 00 03 [ ]*lrag %r9,4095\(%r5,%r10\)
.*: e3 95 af ff 00 0f [ ]*lrvg %r9,4095\(%r5,%r10\)
.*: b9 02 00 96 [ ]*ltgr %r9,%r6
.*: b9 05 00 96 [ ]*lurag %r9,%r6
.*: a7 9d 80 01 [ ]*mghi %r9,-32767
-.*: e3 95 af ff 00 86 [ ]*mlg %r9,4095\(%r5,%r10\)
-.*: b9 86 00 96 [ ]*mlgr %r9,%r6
+.*: e3 85 af ff 00 86 [ ]*mlg %r8,4095\(%r5,%r10\)
+.*: b9 86 00 86 [ ]*mlgr %r8,%r6
.*: e3 95 af ff 00 0c [ ]*msg %r9,4095\(%r5,%r10\)
.*: e3 95 af ff 00 1c [ ]*msgf %r9,4095\(%r5,%r10\)
.*: b9 1c 00 96 [ ]*msgfr %r9,%r6
bxleg %r9,%r6,4095(%r5)
cdgbr %f9,%r6
cdgr %f9,%r6
- cdsg %r9,%r6,4095(%r5)
+ cdsg %r8,%r6,4095(%r5)
cegbr %f9,%r6
cegr %f9,%r6
cg %r9,4095(%r5,%r10)
cgfr %r9,%r6
cghi %r9,-32767
cgr %r9,%r6
- cgxbr %r6,15,%f5
- cgxr %r6,15,%f5
+ cgxbr %r6,15,%f4
+ cgxr %r6,15,%f4
clg %r9,4095(%r5,%r10)
clgf %r9,4095(%r5,%r10)
clgfr %r9,%r6
csg %r9,%r6,4095(%r5)
cvbg %r9,4095(%r5,%r10)
cvdg %r9,4095(%r5,%r10)
- cxgbr %f9,%r6
- cxgr %f9,%r6
- dlg %r9,4095(%r5,%r10)
- dlgr %r9,%r6
- dsg %r9,4095(%r5,%r10)
- dsgf %r9,4095(%r5,%r10)
- dsgfr %r9,%r6
- dsgr %r9,%r6
+ cxgbr %f8,%r6
+ cxgr %f8,%r6
+ dlg %r8,4095(%r5,%r10)
+ dlgr %r8,%r6
+ dsg %r8,4095(%r5,%r10)
+ dsgf %r8,4095(%r5,%r10)
+ dsgfr %r8,%r6
+ dsgr %r8,%r6
eregg %r9,%r6
esea %r9
icmh %r9,10,4095(%r5)
lngr %r9,%r6
lpgfr %r9,%r6
lpgr %r9,%r6
- lpq %r9,4095(%r5,%r10)
+ lpq %r8,4095(%r5,%r10)
lpswe 4095(%r5)
lrag %r9,4095(%r5,%r10)
lrvg %r9,4095(%r5,%r10)
ltgr %r9,%r6
lurag %r9,%r6
mghi %r9,-32767
- mlg %r9,4095(%r5,%r10)
- mlgr %r9,%r6
+ mlg %r8,4095(%r5,%r10)
+ mlgr %r8,%r6
msg %r9,4095(%r5,%r10)
msgf %r9,4095(%r5,%r10)
msgfr %r9,%r6
.*: e3 60 50 00 80 46 [ ]*bctg %r6,-524288\(%r5\)
.*: eb 69 50 00 80 44 [ ]*bxhg %r6,%r9,-524288\(%r5\)
.*: eb 69 50 00 80 45 [ ]*bxleg %r6,%r9,-524288\(%r5\)
-.*: eb 69 50 00 80 3e [ ]*cdsg %r6,%r9,-524288\(%r5\)
-.*: eb 69 50 00 80 31 [ ]*cdsy %r6,%r9,-524288\(%r5\)
+.*: eb 68 50 00 80 3e [ ]*cdsg %r6,%r8,-524288\(%r5\)
+.*: eb 68 50 00 80 31 [ ]*cdsy %r6,%r8,-524288\(%r5\)
.*: e3 65 a0 00 80 20 [ ]*cg %r6,-524288\(%r5,%r10\)
.*: e3 65 a0 00 80 30 [ ]*cgf %r6,-524288\(%r5,%r10\)
.*: e3 65 a0 00 80 79 [ ]*chy %r6,-524288\(%r5,%r10\)
.*: e3 65 a0 00 80 0c [ ]*msg %r6,-524288\(%r5,%r10\)
.*: e3 65 a0 00 80 1c [ ]*msgf %r6,-524288\(%r5,%r10\)
.*: e3 65 a0 00 80 51 [ ]*msy %r6,-524288\(%r5,%r10\)
-.*: eb 69 50 00 80 8e [ ]*mvclu %r6,%r9,-524288\(%r5\)
+.*: eb 68 50 00 80 8e [ ]*mvclu %r6,%r8,-524288\(%r5\)
.*: eb ff 50 00 80 52 [ ]*mviy -524288\(%r5\),255
.*: e3 65 a0 00 80 80 [ ]*ng %r6,-524288\(%r5,%r10\)
.*: eb ff 50 00 80 54 [ ]*niy -524288\(%r5\),255
bctg %r6,-524288(%r5)
bxhg %r6,%r9,-524288(%r5)
bxleg %r6,%r9,-524288(%r5)
- cdsg %r6,%r9,-524288(%r5)
- cdsy %r6,%r9,-524288(%r5)
+ cdsg %r6,%r8,-524288(%r5)
+ cdsy %r6,%r8,-524288(%r5)
cg %r6,-524288(%r5,%r10)
cgf %r6,-524288(%r5,%r10)
chy %r6,-524288(%r5,%r10)
msg %r6,-524288(%r5,%r10)
msgf %r6,-524288(%r5,%r10)
msy %r6,-524288(%r5,%r10)
- mvclu %r6,%r9,-524288(%r5)
+ mvclu %r6,%r8,-524288(%r5)
mviy -524288(%r5),255
ng %r6,-524288(%r5,%r10)
niy -524288(%r5),255
+2011-05-24 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+
+ * opcode/s390.h: Add S390_OPCODE_REG_EVEN flag.
+
2011-05-17 Alan Modra <amodra@gmail.com>
PR ld/12760
the instruction may be optional. */
#define S390_OPERAND_OPTIONAL 0x400
+/* The operand needs to be an even register number. */
+#define S390_OPERAND_REG_EVEN 0x800
+
#endif /* S390_H */
+2011-05-24 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+
+ * s390-opc.c: Add new instruction types marking register pair
+ operands.
+ * s390-opc.txt: Match instructions having register pair operands
+ to the new instruction types.
+
2011-05-19 Nick Clifton <nickc@redhat.com>
* v850-opc.c (cmpf.[sd]): Reverse the order of the reg1 and reg2
#define R_12 2 /* GPR starting at position 12 */
{ 4, 12, S390_OPERAND_GPR },
#define RO_12 3 /* optional GPR starting at position 12 */
- { 4, 12, S390_OPERAND_GPR|S390_OPERAND_OPTIONAL },
+ { 4, 12, S390_OPERAND_GPR | S390_OPERAND_OPTIONAL },
#define R_16 4 /* GPR starting at position 16 */
{ 4, 16, S390_OPERAND_GPR },
#define R_20 5 /* GPR starting at position 20 */
#define R_32 9 /* GPR starting at position 32 */
{ 4, 32, S390_OPERAND_GPR },
+/* General purpose register pair operands. */
+
+#define RE_8 10 /* GPR starting at position 8 */
+ { 4, 8, S390_OPERAND_GPR | S390_OPERAND_REG_EVEN },
+#define RE_12 11 /* GPR starting at position 12 */
+ { 4, 12, S390_OPERAND_GPR | S390_OPERAND_REG_EVEN },
+#define RE_16 12 /* GPR starting at position 16 */
+ { 4, 16, S390_OPERAND_GPR | S390_OPERAND_REG_EVEN },
+#define RE_20 13 /* GPR starting at position 20 */
+ { 4, 20, S390_OPERAND_GPR | S390_OPERAND_REG_EVEN },
+#define RE_24 14 /* GPR starting at position 24 */
+ { 4, 24, S390_OPERAND_GPR | S390_OPERAND_REG_EVEN },
+#define RE_28 15 /* GPR starting at position 28 */
+ { 4, 28, S390_OPERAND_GPR | S390_OPERAND_REG_EVEN },
+#define RE_32 16 /* GPR starting at position 32 */
+ { 4, 32, S390_OPERAND_GPR | S390_OPERAND_REG_EVEN },
+
+
/* Floating point register operands. */
-#define F_8 10 /* FPR starting at position 8 */
+#define F_8 17 /* FPR starting at position 8 */
{ 4, 8, S390_OPERAND_FPR },
-#define F_12 11 /* FPR starting at position 12 */
+#define F_12 18 /* FPR starting at position 12 */
{ 4, 12, S390_OPERAND_FPR },
-#define F_16 12 /* FPR starting at position 16 */
+#define F_16 19 /* FPR starting at position 16 */
{ 4, 16, S390_OPERAND_FPR },
-#define F_20 13 /* FPR starting at position 16 */
+#define F_20 20 /* FPR starting at position 16 */
{ 4, 16, S390_OPERAND_FPR },
-#define F_24 14 /* FPR starting at position 24 */
+#define F_24 21 /* FPR starting at position 24 */
{ 4, 24, S390_OPERAND_FPR },
-#define F_28 15 /* FPR starting at position 28 */
+#define F_28 22 /* FPR starting at position 28 */
{ 4, 28, S390_OPERAND_FPR },
-#define F_32 16 /* FPR starting at position 32 */
+#define F_32 23 /* FPR starting at position 32 */
{ 4, 32, S390_OPERAND_FPR },
+/* Floating point register pair operands. */
+
+#define FE_8 24 /* FPR starting at position 8 */
+ { 4, 8, S390_OPERAND_FPR | S390_OPERAND_REG_EVEN },
+#define FE_12 25 /* FPR starting at position 12 */
+ { 4, 12, S390_OPERAND_FPR | S390_OPERAND_REG_EVEN },
+#define FE_16 26 /* FPR starting at position 16 */
+ { 4, 16, S390_OPERAND_FPR | S390_OPERAND_REG_EVEN },
+#define FE_20 27 /* FPR starting at position 16 */
+ { 4, 16, S390_OPERAND_FPR | S390_OPERAND_REG_EVEN },
+#define FE_24 28 /* FPR starting at position 24 */
+ { 4, 24, S390_OPERAND_FPR | S390_OPERAND_REG_EVEN },
+#define FE_28 29 /* FPR starting at position 28 */
+ { 4, 28, S390_OPERAND_FPR | S390_OPERAND_REG_EVEN },
+#define FE_32 30 /* FPR starting at position 32 */
+ { 4, 32, S390_OPERAND_FPR | S390_OPERAND_REG_EVEN },
+
+
/* Access register operands. */
-#define A_8 17 /* Access reg. starting at position 8 */
+#define A_8 31 /* Access reg. starting at position 8 */
{ 4, 8, S390_OPERAND_AR },
-#define A_12 18 /* Access reg. starting at position 12 */
+#define A_12 32 /* Access reg. starting at position 12 */
{ 4, 12, S390_OPERAND_AR },
-#define A_24 19 /* Access reg. starting at position 24 */
+#define A_24 33 /* Access reg. starting at position 24 */
{ 4, 24, S390_OPERAND_AR },
-#define A_28 20 /* Access reg. starting at position 28 */
+#define A_28 34 /* Access reg. starting at position 28 */
{ 4, 28, S390_OPERAND_AR },
/* Control register operands. */
-#define C_8 21 /* Control reg. starting at position 8 */
+#define C_8 35 /* Control reg. starting at position 8 */
{ 4, 8, S390_OPERAND_CR },
-#define C_12 22 /* Control reg. starting at position 12 */
+#define C_12 36 /* Control reg. starting at position 12 */
{ 4, 12, S390_OPERAND_CR },
/* Base register operands. */
-#define B_16 23 /* Base register starting at position 16 */
- { 4, 16, S390_OPERAND_BASE|S390_OPERAND_GPR },
-#define B_32 24 /* Base register starting at position 32 */
- { 4, 32, S390_OPERAND_BASE|S390_OPERAND_GPR },
+#define B_16 37 /* Base register starting at position 16 */
+ { 4, 16, S390_OPERAND_BASE | S390_OPERAND_GPR },
+#define B_32 38 /* Base register starting at position 32 */
+ { 4, 32, S390_OPERAND_BASE | S390_OPERAND_GPR },
-#define X_12 25 /* Index register starting at position 12 */
- { 4, 12, S390_OPERAND_INDEX|S390_OPERAND_GPR },
+#define X_12 39 /* Index register starting at position 12 */
+ { 4, 12, S390_OPERAND_INDEX | S390_OPERAND_GPR },
/* Address displacement operands. */
-#define D_20 26 /* Displacement starting at position 20 */
+#define D_20 40 /* Displacement starting at position 20 */
{ 12, 20, S390_OPERAND_DISP },
-#define DO_20 27 /* optional Displ. starting at position 20 */
- { 12, 20, S390_OPERAND_DISP|S390_OPERAND_OPTIONAL },
-#define D_36 28 /* Displacement starting at position 36 */
+#define DO_20 41 /* optional Displ. starting at position 20 */
+ { 12, 20, S390_OPERAND_DISP | S390_OPERAND_OPTIONAL },
+#define D_36 42 /* Displacement starting at position 36 */
{ 12, 36, S390_OPERAND_DISP },
-#define D20_20 29 /* 20 bit displacement starting at 20 */
- { 20, 20, S390_OPERAND_DISP|S390_OPERAND_SIGNED },
+#define D20_20 43 /* 20 bit displacement starting at 20 */
+ { 20, 20, S390_OPERAND_DISP | S390_OPERAND_SIGNED },
/* Length operands. */
-#define L4_8 30 /* 4 bit length starting at position 8 */
+#define L4_8 44 /* 4 bit length starting at position 8 */
{ 4, 8, S390_OPERAND_LENGTH },
-#define L4_12 31 /* 4 bit length starting at position 12 */
+#define L4_12 45 /* 4 bit length starting at position 12 */
{ 4, 12, S390_OPERAND_LENGTH },
-#define L8_8 32 /* 8 bit length starting at position 8 */
+#define L8_8 46 /* 8 bit length starting at position 8 */
{ 8, 8, S390_OPERAND_LENGTH },
/* Signed immediate operands. */
-#define I8_8 33 /* 8 bit signed value starting at 8 */
+#define I8_8 47 /* 8 bit signed value starting at 8 */
{ 8, 8, S390_OPERAND_SIGNED },
-#define I8_32 34 /* 8 bit signed value starting at 32 */
+#define I8_32 48 /* 8 bit signed value starting at 32 */
{ 8, 32, S390_OPERAND_SIGNED },
-#define I16_16 35 /* 16 bit signed value starting at 16 */
+#define I16_16 49 /* 16 bit signed value starting at 16 */
{ 16, 16, S390_OPERAND_SIGNED },
-#define I16_32 36 /* 16 bit signed value starting at 32 */
+#define I16_32 50 /* 16 bit signed value starting at 32 */
{ 16, 32, S390_OPERAND_SIGNED },
-#define I32_16 37 /* 32 bit signed value starting at 16 */
+#define I32_16 51 /* 32 bit signed value starting at 16 */
{ 32, 16, S390_OPERAND_SIGNED },
/* Unsigned immediate operands. */
-#define U4_8 38 /* 4 bit unsigned value starting at 8 */
+#define U4_8 52 /* 4 bit unsigned value starting at 8 */
{ 4, 8, 0 },
-#define U4_12 39 /* 4 bit unsigned value starting at 12 */
+#define U4_12 53 /* 4 bit unsigned value starting at 12 */
{ 4, 12, 0 },
-#define U4_16 40 /* 4 bit unsigned value starting at 16 */
+#define U4_16 54 /* 4 bit unsigned value starting at 16 */
{ 4, 16, 0 },
-#define U4_20 41 /* 4 bit unsigned value starting at 20 */
+#define U4_20 55 /* 4 bit unsigned value starting at 20 */
{ 4, 20, 0 },
-#define U4_32 42 /* 4 bit unsigned value starting at 32 */
+#define U4_32 56 /* 4 bit unsigned value starting at 32 */
{ 4, 32, 0 },
-#define U8_8 43 /* 8 bit unsigned value starting at 8 */
+#define U8_8 57 /* 8 bit unsigned value starting at 8 */
{ 8, 8, 0 },
-#define U8_16 44 /* 8 bit unsigned value starting at 16 */
+#define U8_16 58 /* 8 bit unsigned value starting at 16 */
{ 8, 16, 0 },
-#define U8_24 45 /* 8 bit unsigned value starting at 24 */
+#define U8_24 59 /* 8 bit unsigned value starting at 24 */
{ 8, 24, 0 },
-#define U8_32 46 /* 8 bit unsigned value starting at 32 */
+#define U8_32 60 /* 8 bit unsigned value starting at 32 */
{ 8, 32, 0 },
-#define U16_16 47 /* 16 bit unsigned value starting at 16 */
+#define U16_16 61 /* 16 bit unsigned value starting at 16 */
{ 16, 16, 0 },
-#define U16_32 48 /* 16 bit unsigned value starting at 32 */
+#define U16_32 62 /* 16 bit unsigned value starting at 32 */
{ 16, 32, 0 },
-#define U32_16 49 /* 32 bit unsigned value starting at 16 */
+#define U32_16 63 /* 32 bit unsigned value starting at 16 */
{ 32, 16, 0 },
/* PC-relative address operands. */
-#define J16_16 50 /* PC relative jump offset at 16 */
+#define J16_16 64 /* PC relative jump offset at 16 */
{ 16, 16, S390_OPERAND_PCREL },
-#define J32_16 51 /* PC relative long offset at 16 */
+#define J32_16 65 /* PC relative long offset at 16 */
{ 32, 16, S390_OPERAND_PCREL },
/* Conditional mask operands. */
-#define M_16OPT 52 /* 4 bit optional mask starting at 16 */
+#define M_16OPT 66 /* 4 bit optional mask starting at 16 */
{ 4, 16, S390_OPERAND_OPTIONAL },
};
c - control register
d - displacement, 12 bit
f - floating pointer register
+ fe - even numbered floating point register operand
i - signed integer, 4, 8, 16 or 32 bit
l - length, 4 or 8 bit
p - pc relative
r - general purpose register
+ ro - optional register operand
+ re - even numbered register operand
u - unsigned integer, 4, 8, 16 or 32 bit
m - mode field, 4 bit
0 - operand skipped.
#define INSTR_RRE_AA 4, { A_24,A_28,0,0,0,0 } /* e.g. cpya */
#define INSTR_RRE_AR 4, { A_24,R_28,0,0,0,0 } /* e.g. sar */
#define INSTR_RRE_F0 4, { F_24,0,0,0,0,0 } /* e.g. sqer */
+#define INSTR_RRE_FE0 4, { FE_24,0,0,0,0,0 } /* e.g. lzxr */
#define INSTR_RRE_FF 4, { F_24,F_28,0,0,0,0 } /* e.g. debr */
+#define INSTR_RRE_FEF 4, { FE_24,F_28,0,0,0,0 } /* e.g. lxdbr */
+#define INSTR_RRE_FFE 4, { F_24,FE_28,0,0,0,0 } /* e.g. lexr */
+#define INSTR_RRE_FEFE 4, { FE_24,FE_28,0,0,0,0 } /* e.g. dxr */
#define INSTR_RRE_R0 4, { R_24,0,0,0,0,0 } /* e.g. ipm */
#define INSTR_RRE_RA 4, { R_24,A_28,0,0,0,0 } /* e.g. ear */
#define INSTR_RRE_RF 4, { R_24,F_28,0,0,0,0 } /* e.g. cefbr */
+#define INSTR_RRE_RFE 4, { R_24,FE_28,0,0,0,0 } /* e.g. csxtr */
#define INSTR_RRE_RR 4, { R_24,R_28,0,0,0,0 } /* e.g. lura */
+#define INSTR_RRE_RER 4, { RE_24,R_28,0,0,0,0 } /* e.g. tre */
+#define INSTR_RRE_RERE 4, { RE_24,RE_28,0,0,0,0 } /* e.g. cuse */
#define INSTR_RRE_FR 4, { F_24,R_28,0,0,0,0 } /* e.g. ldgr */
+#define INSTR_RRE_FER 4, { FE_24,R_28,0,0,0,0 } /* e.g. cxfbr */
/* Actually efpc and sfpc do not take an optional operand.
This is just a workaround for existing code e.g. glibc. */
#define INSTR_RRE_RR_OPT 4, { R_24,RO_28,0,0,0,0 } /* efpc, sfpc */
#define INSTR_RRF_F0FF 4, { F_16,F_24,F_28,0,0,0 } /* e.g. madbr */
+#define INSTR_RRF_FE0FF 4, { F_16,F_24,F_28,0,0,0 } /* e.g. myr */
#define INSTR_RRF_F0FF2 4, { F_24,F_16,F_28,0,0,0 } /* e.g. cpsdr */
#define INSTR_RRF_F0FR 4, { F_24,F_16,R_28,0,0,0 } /* e.g. iedtr */
+#define INSTR_RRF_FE0FER 4, { FE_24,FE_16,R_28,0,0,0 } /* e.g. iextr */
#define INSTR_RRF_FUFF 4, { F_24,F_16,F_28,U4_20,0,0 } /* e.g. didbr */
+#define INSTR_RRF_FEUFEFE 4, { FE_24,FE_16,FE_28,U4_20,0,0 } /* e.g. qaxtr */
#define INSTR_RRF_FUFF2 4, { F_24,F_28,F_16,U4_20,0,0 } /* e.g. adtra */
+#define INSTR_RRF_FEUFEFE2 4, { FE_24,FE_28,FE_16,U4_20,0,0 } /* e.g. axtra */
#define INSTR_RRF_RURR 4, { R_24,R_28,R_16,U4_20,0,0 } /* e.g. .insn */
#define INSTR_RRF_R0RR 4, { R_24,R_16,R_28,0,0,0 } /* e.g. idte */
#define INSTR_RRF_R0RR2 4, { R_24,R_28,R_16,0,0,0 } /* e.g. ark */
#define INSTR_RRF_U0FF 4, { F_24,U4_16,F_28,0,0,0 } /* e.g. fixr */
+#define INSTR_RRF_U0FEFE 4, { FE_24,U4_16,FE_28,0,0,0 } /* e.g. fixbr */
#define INSTR_RRF_U0RF 4, { R_24,U4_16,F_28,0,0,0 } /* e.g. cfebr */
+#define INSTR_RRF_U0RFE 4, { R_24,U4_16,FE_28,0,0,0 } /* e.g. cfxbr */
#define INSTR_RRF_UUFF 4, { F_24,U4_16,F_28,U4_20,0,0 } /* e.g. fidtr */
+#define INSTR_RRF_UUFFE 4, { F_24,U4_16,FE_28,U4_20,0,0 } /* e.g. ldxtr */
+#define INSTR_RRF_UUFEFE 4, { FE_24,U4_16,FE_28,U4_20,0,0 } /* e.g. fixtr */
#define INSTR_RRF_0UFF 4, { F_24,F_28,U4_20,0,0,0 } /* e.g. ldetr */
+#define INSTR_RRF_0UFEF 4, { F_24,FE_28,U4_20,0,0,0 } /* e.g. lxdtr */
#define INSTR_RRF_FFRU 4, { F_24,F_16,R_28,U4_20,0,0 } /* e.g. rrdtr */
+#define INSTR_RRF_FEFERU 4, { FE_24,FE_16,R_28,U4_20,0,0 } /* e.g. rrxtr */
#define INSTR_RRF_M0RR 4, { R_24,R_28,M_16OPT,0,0,0 } /* e.g. sske */
+#define INSTR_RRF_M0RER 4, { RE_24,R_28,M_16OPT,0,0,0 } /* e.g. trte */
+#define INSTR_RRF_M0RERE 4, { RE_24,RE_28,M_16OPT,0,0,0 } /* e.g. troo */
#define INSTR_RRF_U0RR 4, { R_24,R_28,U4_16,0,0,0 } /* e.g. clrt */
#define INSTR_RRF_00RR 4, { R_24,R_28,0,0,0,0 } /* e.g. clrtne */
#define INSTR_RRF_UUFR 4, { F_24,U4_16,R_28,U4_20,0,0 } /* e.g. cdgtra */
+#define INSTR_RRF_UUFER 4, { FE_24,U4_16,R_28,U4_20,0,0 } /* e.g. cxfbra */
#define INSTR_RRF_UURF 4, { R_24,U4_16,F_28,U4_20,0,0 } /* e.g. cgdtra */
+#define INSTR_RRF_UURFE 4, { R_24,U4_16,FE_28,U4_20,0,0 } /* e.g. cfxbra */
#define INSTR_RR_0R 2, { R_12, 0,0,0,0,0 } /* e.g. br */
#define INSTR_RR_0R_OPT 2, { RO_12, 0,0,0,0,0 } /* e.g. nopr */
#define INSTR_RR_FF 2, { F_8,F_12,0,0,0,0 } /* e.g. adr */
+#define INSTR_RR_FEF 2, { FE_8,F_12,0,0,0,0 } /* e.g. mxdr */
+#define INSTR_RR_FFE 2, { F_8,FE_12,0,0,0,0 } /* e.g. ldxr */
+#define INSTR_RR_FEFE 2, { FE_8,FE_12,0,0,0,0 } /* e.g. axr */
#define INSTR_RR_R0 2, { R_8, 0,0,0,0,0 } /* e.g. spm */
#define INSTR_RR_RR 2, { R_8,R_12,0,0,0,0 } /* e.g. lr */
+#define INSTR_RR_RER 2, { RE_8,R_12,0,0,0,0 } /* e.g. dr */
#define INSTR_RR_U0 2, { U8_8, 0,0,0,0,0 } /* e.g. svc */
#define INSTR_RR_UR 2, { U4_8,R_12,0,0,0,0 } /* e.g. bcr */
#define INSTR_RRR_F0FF 4, { F_24,F_28,F_16,0,0,0 } /* e.g. ddtr */
+#define INSTR_RRR_FE0FEFE 4, { FE_24,FE_28,FE_16,0,0,0 } /* e.g. axtr */
#define INSTR_RRS_RRRDU 6, { R_8,R_12,U4_32,D_20,B_16 } /* e.g. crb */
#define INSTR_RRS_RRRD0 6, { R_8,R_12,D_20,B_16,0 } /* e.g. crbne */
#define INSTR_RSE_RRRD 6, { R_8,R_12,D_20,B_16,0,0 } /* e.g. lmh */
+#define INSTR_RSE_RERERD 6, { RE_8,RE_12,D_20,B_16,0,0 } /* e.g. mvclu */
#define INSTR_RSE_CCRD 6, { C_8,C_12,D_20,B_16,0,0 } /* e.g. lmh */
#define INSTR_RSE_RURD 6, { R_8,U4_12,D_20,B_16,0,0 } /* e.g. icmh */
#define INSTR_RSL_R0RD 6, { D_20,L4_8,B_16,0,0,0 } /* e.g. tp */
#define INSTR_RSI_RRP 4, { R_8,R_12,J16_16,0,0,0 } /* e.g. brxh */
#define INSTR_RSY_RRRD 6, { R_8,R_12,D20_20,B_16,0,0 } /* e.g. stmy */
+#define INSTR_RSY_RERERD 6, { RE_8,RE_12,D20_20,B_16,0,0 } /* e.g. cdsy */
#define INSTR_RSY_RURD 6, { R_8,U4_12,D20_20,B_16,0,0 } /* e.g. icmh */
#define INSTR_RSY_AARD 6, { A_8,A_12,D20_20,B_16,0,0 } /* e.g. lamy */
#define INSTR_RSY_CCRD 6, { C_8,C_12,D20_20,B_16,0,0 } /* e.g. lamy */
#define INSTR_RS_AARD 4, { A_8,A_12,D_20,B_16,0,0 } /* e.g. lam */
#define INSTR_RS_CCRD 4, { C_8,C_12,D_20,B_16,0,0 } /* e.g. lctl */
#define INSTR_RS_R0RD 4, { R_8,D_20,B_16,0,0,0 } /* e.g. sll */
+#define INSTR_RS_RE0RD 4, { RE_8,D_20,B_16,0,0,0 } /* e.g. slda */
#define INSTR_RS_RRRD 4, { R_8,R_12,D_20,B_16,0,0 } /* e.g. cs */
+#define INSTR_RS_RERERD 4, { RE_8,RE_12,D_20,B_16,0,0 } /* e.g. cds */
#define INSTR_RS_RURD 4, { R_8,U4_12,D_20,B_16,0,0 } /* e.g. icm */
#define INSTR_RXE_FRRD 6, { F_8,D_20,X_12,B_16,0,0 } /* e.g. axbr */
+#define INSTR_RXE_FERRD 6, { FE_8,D_20,X_12,B_16,0,0 } /* e.g. lxdb */
#define INSTR_RXE_RRRD 6, { R_8,D_20,X_12,B_16,0,0 } /* e.g. lg */
+#define INSTR_RXE_RERRD 6, { RE_8,D_20,X_12,B_16,0,0 } /* e.g. dsg */
#define INSTR_RXF_FRRDF 6, { F_32,F_8,D_20,X_12,B_16,0 } /* e.g. madb */
+#define INSTR_RXF_FRRDFE 6, { FE_32,F_8,D_20,X_12,B_16,0 } /* e.g. my */
+#define INSTR_RXF_FERRDFE 6, { FE_32,FE_8,D_20,X_12,B_16,0 } /* e.g. slxt */
#define INSTR_RXF_RRRDR 6, { R_32,R_8,D_20,X_12,B_16,0 } /* e.g. .insn */
#define INSTR_RXY_RRRD 6, { R_8,D20_20,X_12,B_16,0,0 } /* e.g. ly */
+#define INSTR_RXY_RERRD 6, { RE_8,D20_20,X_12,B_16,0,0 } /* e.g. dsg */
#define INSTR_RXY_FRRD 6, { F_8,D20_20,X_12,B_16,0,0 } /* e.g. ley */
#define INSTR_RXY_URRD 6, { U4_8,D20_20,X_12,B_16,0,0 } /* e.g. pfd */
#define INSTR_RX_0RRD 4, { D_20,X_12,B_16,0,0,0 } /* e.g. be */
#define INSTR_RX_0RRD_OPT 4, { DO_20,X_12,B_16,0,0,0 } /* e.g. nop */
#define INSTR_RX_FRRD 4, { F_8,D_20,X_12,B_16,0,0 } /* e.g. ae */
+#define INSTR_RX_FERRD 4, { FE_8,D_20,X_12,B_16,0,0 } /* e.g. mxd */
#define INSTR_RX_RRRD 4, { R_8,D_20,X_12,B_16,0,0 } /* e.g. l */
+#define INSTR_RX_RERRD 4, { RE_8,D_20,X_12,B_16,0,0 } /* e.g. d */
#define INSTR_RX_URRD 4, { U4_8,D_20,X_12,B_16,0,0 } /* e.g. bc */
#define INSTR_SI_URD 4, { D_20,B_16,U8_8,0,0,0 } /* e.g. cli */
#define INSTR_SIY_URD 6, { D20_20,B_16,U8_8,0,0,0 } /* e.g. tmy */
#define INSTR_SS_RRRDRD2 6, { R_8,D_20,B_16,R_12,D_36,B_32 } /* e.g. plo */
#define INSTR_SS_RRRDRD3 6, { R_8,R_12,D_20,B_16,D_36,B_32 } /* e.g. lmd */
#define INSTR_SSF_RRDRD 6, { D_20,B_16,D_36,B_32,R_8,0 } /* e.g. mvcos */
-#define INSTR_SSF_RRDRD2 6, { R_8,D_20,B_16,D_36,B_32,0 } /* e.g. lpd */
+#define INSTR_SSF_RRDRD2 6, { R_8,D_20,B_16,D_36,B_32,0 }
+#define INSTR_SSF_RERDRD2 6, { RE_8,D_20,B_16,D_36,B_32,0 } /* e.g. lpd */
#define INSTR_S_00 4, { 0,0,0,0,0,0 } /* e.g. hsch */
#define INSTR_S_RD 4, { D_20,B_16,0,0,0,0 } /* e.g. lpsw */
#define MASK_RRE_AA { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
#define MASK_RRE_AR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
#define MASK_RRE_F0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 }
+#define MASK_RRE_FE0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 }
#define MASK_RRE_FF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
+#define MASK_RRE_FEF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
+#define MASK_RRE_FFE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
+#define MASK_RRE_FEFE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
#define MASK_RRE_R0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 }
#define MASK_RRE_RA { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
#define MASK_RRE_RF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
+#define MASK_RRE_RFE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
#define MASK_RRE_RR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
+#define MASK_RRE_RER { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
+#define MASK_RRE_RERE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
#define MASK_RRE_FR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
+#define MASK_RRE_FER { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
#define MASK_RRE_RR_OPT { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
#define MASK_RRF_F0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
+#define MASK_RRF_FE0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_F0FF2 { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_F0FR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
+#define MASK_RRF_FE0FER { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_FUFF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RRF_FEUFEFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_FUFF2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RRF_FEUFEFE2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_RURR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_R0RR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_R0RR2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_U0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
+#define MASK_RRF_U0FEFE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_U0RF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
+#define MASK_RRF_U0RFE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_UUFF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RRF_UUFFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RRF_UUFEFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_0UFF { 0xff, 0xff, 0xf0, 0x00, 0x00, 0x00 }
+#define MASK_RRF_0UFEF { 0xff, 0xff, 0xf0, 0x00, 0x00, 0x00 }
#define MASK_RRF_FFRU { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RRF_FEFERU { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_M0RR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
+#define MASK_RRF_M0RER { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
+#define MASK_RRF_M0RERE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_U0RR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_00RR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
#define MASK_RRF_UUFR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RRF_UUFER { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_UURF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RRF_UURFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RR_0R { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RR_0R_OPT { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RR_FF { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RR_FEF { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RR_FFE { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RR_FEFE { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RR_R0 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RR_RR { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RR_RER { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RR_U0 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RR_UR { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRR_F0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
+#define MASK_RRR_FE0FEFE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRS_RRRDU { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff }
#define MASK_RRS_RRRD0 { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff }
#define MASK_RSE_RRRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff }
+#define MASK_RSE_RERERD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff }
#define MASK_RSE_CCRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff }
#define MASK_RSE_RURD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff }
#define MASK_RSL_R0RD { 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff }
#define MASK_RS_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RS_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RS_R0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RS_RE0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RS_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RS_RERERD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RS_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RSY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_RSY_RERERD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RSY_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RSY_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RSY_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RSY_RDRM { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RSY_RDR0 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff }
#define MASK_RXE_FRRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff }
+#define MASK_RXE_FERRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff }
#define MASK_RXE_RRRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff }
+#define MASK_RXE_RERRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff }
#define MASK_RXF_FRRDF { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff }
+#define MASK_RXF_FRRDFE { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff }
+#define MASK_RXF_FERRDFE { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff }
#define MASK_RXF_RRRDR { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff }
#define MASK_RXY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_RXY_RERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RXY_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RXY_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RX_0RRD { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RX_0RRD_OPT { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RX_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RX_FERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RX_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RX_RERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RX_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_SI_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_SIY_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_SS_RRRDRD3 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_SSF_RRDRD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 }
#define MASK_SSF_RRDRD2 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_SSF_RERDRD2 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 }
#define MASK_S_00 { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00 }
#define MASK_S_RD { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
3e aur RR_FF "add unnormalized (short)" g5 esa,zarch
6e aw RX_FRRD "add unnormalized (long)" g5 esa,zarch
2e awr RR_FF "add unnormalized (long)" g5 esa,zarch
-36 axr RR_FF "add normalized" g5 esa,zarch
+36 axr RR_FEFE "add normalized" g5 esa,zarch
b240 bakr RRE_RR "branch and stack" g5 esa,zarch
45 bal RX_RRRD "branch and link" g5 esa,zarch
05 balr RR_RR "branch and link" g5 esa,zarch
59 c RX_RRRD "compare" g5 esa,zarch
69 cd RX_FRRD "compare (long)" g5 esa,zarch
29 cdr RR_FF "compare (long)" g5 esa,zarch
-bb cds RS_RRRD "compare double and swap" g5 esa,zarch
+bb cds RS_RERERD "compare double and swap" g5 esa,zarch
79 ce RX_FRRD "compare (short)" g5 esa,zarch
39 cer RR_FF "compare (short)" g5 esa,zarch
b21a cfc S_RD "compare and form codeword" g5 esa,zarch
19 cr RR_RR "compare" g5 esa,zarch
ba cs RS_RRRD "compare and swap" g5 esa,zarch
b230 csch S_00 "clear subchannel" g5 esa,zarch
-b257 cuse RRE_RR "compare until substring equal" g5 esa,zarch
+b257 cuse RRE_RERE "compare until substring equal" g5 esa,zarch
b250 csp RRE_RR "compare and swap and purge" g5 esa,zarch
4f cvb RX_RRRD "convert to binary" g5 esa,zarch
4e cvd RX_RRRD "convert to decimal" g5 esa,zarch
-5d d RX_RRRD "divide" g5 esa,zarch
+5d d RX_RERRD "divide" g5 esa,zarch
6d dd RX_FRRD "divide (long)" g5 esa,zarch
2d ddr RR_FF "divide (long)" g5 esa,zarch
7d de RX_FRRD "divide (short)" g5 esa,zarch
3d der RR_FF "divide (short)" g5 esa,zarch
83 diag RS_RRRD "diagnose" g5 esa,zarch
fd dp SS_LLRDRD "divide decimal" g5 esa,zarch
-1d dr RR_RR "divide" g5 esa,zarch
-b22d dxr RRE_FF "divide (ext.)" g5 esa,zarch
+1d dr RR_RER "divide" g5 esa,zarch
+b22d dxr RRE_FEFE "divide (ext.)" g5 esa,zarch
b24f ear RRE_RA "extract access" g5 esa,zarch
de ed SS_L0RDRD "edit" g5 esa,zarch
df edmk SS_L0RDRD "edit and mark" g5 esa,zarch
82 lpsw S_RD "load PSW" g5 esa,zarch
18 lr RR_RR "load" g5 esa,zarch
b1 lra RX_RRRD "load real address" g5 esa,zarch
-25 ldxr RR_FF "load rounded (ext. to long)" g5 esa,zarch
-25 lrdr RR_FF "load rounded (ext. to long)" g5 esa,zarch
+25 ldxr RR_FFE "load rounded (ext. to long)" g5 esa,zarch
+25 lrdr RR_FFE "load rounded (ext. to long)" g5 esa,zarch
35 ledr RR_FF "load rounded (long to short)" g5 esa,zarch
35 lrer RR_FF "load rounded (long to short)" g5 esa,zarch
22 ltdr RR_FF "load and test (long)" g5 esa,zarch
32 lter RR_FF "load and test (short)" g5 esa,zarch
12 ltr RR_RR "load and test" g5 esa,zarch
b24b lura RRE_RR "load using real address" g5 esa,zarch
-5c m RX_RRRD "multiply" g5 esa,zarch
+5c m RX_RERRD "multiply" g5 esa,zarch
af mc SI_URD "monitor call" g5 esa,zarch
6c md RX_FRRD "multiply (long)" g5 esa,zarch
2c mdr RR_FF "multiply (long)" g5 esa,zarch
3c mer RR_FF "multiply (short to long)" g5 esa,zarch
4c mh RX_RRRD "multiply halfword" g5 esa,zarch
fc mp SS_LLRDRD "multiply decimal" g5 esa,zarch
-1c mr RR_RR "multiply" g5 esa,zarch
+1c mr RR_RER "multiply" g5 esa,zarch
b232 msch S_RD "modify subchannel" g5 esa,zarch
b247 msta RRE_R0 "modify stacked state" g5 esa,zarch
d2 mvc SS_L0RDRD "move" g5 esa,zarch
b254 mvpg RRE_RR "move page" g5 esa,zarch
b255 mvst RRE_RR "move string" g5 esa,zarch
d3 mvz SS_L0RDRD "move zones" g5 esa,zarch
-67 mxd RX_FRRD "multiply (long to ext.)" g5 esa,zarch
-27 mxdr RR_FF "multiply (long to ext.)" g5 esa,zarch
-26 mxr RR_FF "multiply (ext.)" g5 esa,zarch
+67 mxd RX_FERRD "multiply (long to ext.)" g5 esa,zarch
+27 mxdr RR_FEF "multiply (long to ext.)" g5 esa,zarch
+26 mxr RR_FEFE "multiply (ext.)" g5 esa,zarch
54 n RX_RRRD "AND" g5 esa,zarch
d4 nc SS_L0RDRD "AND" g5 esa,zarch
94 ni SI_URD "AND" g5 esa,zarch
ae sigp RS_RRRD "signal processor" g5 esa,zarch
5f sl RX_RRRD "subtract logical" g5 esa,zarch
8b sla RS_R0RD "shift left single" g5 esa,zarch
-8f slda RS_R0RD "shift left double (long)" g5 esa,zarch
-8d sldl RS_R0RD "shift left double logical (long)" g5 esa,zarch
+8f slda RS_RE0RD "shift left double (long)" g5 esa,zarch
+8d sldl RS_RE0RD "shift left double logical (long)" g5 esa,zarch
89 sll RS_R0RD "shift left single logical" g5 esa,zarch
1f slr RR_RR "subtract logical" g5 esa,zarch
fb sp SS_LLRDRD "subtract decimal" g5 esa,zarch
b245 sqer RRE_FF "square root (short)" g5 esa,zarch
1b sr RR_RR "subtract" g5 esa,zarch
8a sra RS_R0RD "shift right single" g5 esa,zarch
-8e srda RS_R0RD "shift right double (long)" g5 esa,zarch
-8c srdl RS_R0RD "shift right double logical (long)" g5 esa,zarch
+8e srda RS_RE0RD "shift right double (long)" g5 esa,zarch
+8c srdl RS_RE0RD "shift right double logical (long)" g5 esa,zarch
88 srl RS_R0RD "shift right single logical" g5 esa,zarch
f0 srp SS_LIRDRD "shift and round decimal" g5 esa,zarch
b25e srst RRE_RR "search string" g5 esa,zarch
0a svc RR_U0 "supervisor call" g5 esa,zarch
6f sw RX_FRRD "subtract unnormalized (long)" g5 esa,zarch
2f swr RR_FF "subtract unnormalized (long)" g5 esa,zarch
-37 sxr RR_FF "subtract normalized (ext.)" g5 esa,zarch
+37 sxr RR_FEFE "subtract normalized (ext.)" g5 esa,zarch
b24c tar RRE_AR "test access" g5 esa,zarch
b22c tb RRE_0R "test block" g5 esa,zarch
91 tm SI_URD "test under mask" g5 esa,zarch
a70e chi RI_RI "compare halfword immediate" g5 esa,zarch
a9 clcle RS_RRRD "compare logical long extended" g5 esa,zarch
a708 lhi RI_RI "load halfword immediate" g5 esa,zarch
-a8 mvcle RS_RRRD "move long extended" g5 esa,zarch
+a8 mvcle RS_RERERD "move long extended" g5 esa,zarch
a70c mhi RI_RI "multiply halfword immediate" g5 esa,zarch
b252 msr RRE_RR "multiply single" g5 esa,zarch
71 ms RX_RRRD "multiply single" g5 esa,zarch
47f0 b RX_0RRD "unconditional branch" g5 esa,zarch
a704 j*8 RI_0P "conditional jump" g5 esa,zarch
a7f4 j RI_0P "unconditional jump" g5 esa,zarch
-b34a axbr RRE_FF "add extended bfp" g5 esa,zarch
+b34a axbr RRE_FEFE "add extended bfp" g5 esa,zarch
b31a adbr RRE_FF "add long bfp" g5 esa,zarch
ed000000001a adb RXE_FRRD "add long bfp" g5 esa,zarch
b30a aebr RRE_FF "add short bfp" g5 esa,zarch
ed000000000a aeb RXE_FRRD "add short bfp" g5 esa,zarch
-b349 cxbr RRE_FF "compare extended bfp" g5 esa,zarch
+b349 cxbr RRE_FEFE "compare extended bfp" g5 esa,zarch
b319 cdbr RRE_FF "compare long bfp" g5 esa,zarch
ed0000000019 cdb RXE_FRRD "compare long bfp" g5 esa,zarch
b309 cebr RRE_FF "compare short bfp" g5 esa,zarch
ed0000000018 kdb RXE_FRRD "compare and signal long bfp" g5 esa,zarch
b308 kebr RRE_FF "compare and signal short bfp" g5 esa,zarch
ed0000000008 keb RXE_FRRD "compare and signal short bfp" g5 esa,zarch
-b396 cxfbr RRE_FR "convert from fixed 32 to extended bfp" g5 esa,zarch
+b396 cxfbr RRE_FER "convert from fixed 32 to extended bfp" g5 esa,zarch
b395 cdfbr RRE_FR "convert from fixed 32 to long bfp" g5 esa,zarch
b394 cefbr RRE_FR "convert from fixed 32 to short bfp" g5 esa,zarch
-b39a cfxbr RRF_U0RF "convert to fixed extended bfp to 32" g5 esa,zarch
+b39a cfxbr RRF_U0RFE "convert to fixed extended bfp to 32" g5 esa,zarch
b399 cfdbr RRF_U0RF "convert to fixed long bfp to 32" g5 esa,zarch
b398 cfebr RRF_U0RF "convert to fixed short bfp to 32" g5 esa,zarch
-b34d dxbr RRE_FF "divide extended bfp" g5 esa,zarch
+b34d dxbr RRE_FEFE "divide extended bfp" g5 esa,zarch
b31d ddbr RRE_FF "divide long bfp" g5 esa,zarch
ed000000001d ddb RXE_FRRD "divide long bfp" g5 esa,zarch
b30d debr RRE_FF "divide short bfp" g5 esa,zarch
b35b didbr RRF_FUFF "divide to integer long bfp" g5 esa,zarch
b353 diebr RRF_FUFF "divide to integer short bfp" g5 esa,zarch
b38c efpc RRE_RR_OPT "extract fpc" g5 esa,zarch
-b342 ltxbr RRE_FF "load and test extended bfp" g5 esa,zarch
+b342 ltxbr RRE_FEFE "load and test extended bfp" g5 esa,zarch
b312 ltdbr RRE_FF "load and test long bfp" g5 esa,zarch
b302 ltebr RRE_FF "load and test short bfp" g5 esa,zarch
-b343 lcxbr RRE_FF "load complement extended bfp" g5 esa,zarch
+b343 lcxbr RRE_FEFE "load complement extended bfp" g5 esa,zarch
b313 lcdbr RRE_FF "load complement long bfp" g5 esa,zarch
b303 lcebr RRE_FF "load complement short bfp" g5 esa,zarch
-b347 fixbr RRF_U0FF "load fp integer extended bfp" g5 esa,zarch
+b347 fixbr RRF_U0FEFE "load fp integer extended bfp" g5 esa,zarch
b35f fidbr RRF_U0FF "load fp integer long bfp" g5 esa,zarch
b357 fiebr RRF_U0FF "load fp integer short bfp" g5 esa,zarch
b29d lfpc S_RD "load fpc" g5 esa,zarch
-b305 lxdbr RRE_FF "load lengthened long to extended bfp" g5 esa,zarch
-ed0000000005 lxdb RXE_FRRD "load lengthened long to extended bfp" g5 esa,zarch
-b306 lxebr RRE_FF "load lengthened short to extended bfp" g5 esa,zarch
-ed0000000006 lxeb RXE_FRRD "load lengthened short to extended bfp" g5 esa,zarch
+b305 lxdbr RRE_FEF "load lengthened long to extended bfp" g5 esa,zarch
+ed0000000005 lxdb RXE_FERRD "load lengthened long to extended bfp" g5 esa,zarch
+b306 lxebr RRE_FEF "load lengthened short to extended bfp" g5 esa,zarch
+ed0000000006 lxeb RXE_FERRD "load lengthened short to extended bfp" g5 esa,zarch
b304 ldebr RRE_FF "load lengthened short to long bfp" g5 esa,zarch
ed0000000004 ldeb RXE_FRRD "load lengthened short to long bfp" g5 esa,zarch
-b341 lnxbr RRE_FF "load negative extended bfp" g5 esa,zarch
+b341 lnxbr RRE_FEFE "load negative extended bfp" g5 esa,zarch
b311 lndbr RRE_FF "load negative long bfp" g5 esa,zarch
b301 lnebr RRE_FF "load negative short bfp" g5 esa,zarch
-b340 lpxbr RRE_FF "load positive extended bfp" g5 esa,zarch
+b340 lpxbr RRE_FEFE "load positive extended bfp" g5 esa,zarch
b310 lpdbr RRE_FF "load positive long bfp" g5 esa,zarch
b300 lpebr RRE_FF "load positive short bfp" g5 esa,zarch
-b345 ldxbr RRE_FF "load rounded extended to long bfp" g5 esa,zarch
-b346 lexbr RRE_FF "load rounded extended to short bfp" g5 esa,zarch
+b345 ldxbr RRE_FEFE "load rounded extended to long bfp" g5 esa,zarch
+b346 lexbr RRE_FEFE "load rounded extended to short bfp" g5 esa,zarch
b344 ledbr RRE_FF "load rounded long to short bfp" g5 esa,zarch
-b34c mxbr RRE_FF "multiply extended bfp" g5 esa,zarch
+b34c mxbr RRE_FEFE "multiply extended bfp" g5 esa,zarch
b31c mdbr RRE_FF "multiply long bfp" g5 esa,zarch
ed000000001c mdb RXE_FRRD "multiply long bfp" g5 esa,zarch
-b307 mxdbr RRE_FF "multiply long to extended bfp" g5 esa,zarch
-ed0000000007 mxdb RXE_FRRD "multiply long to extended bfp" g5 esa,zarch
+b307 mxdbr RRE_FEF "multiply long to extended bfp" g5 esa,zarch
+ed0000000007 mxdb RXE_FERRD "multiply long to extended bfp" g5 esa,zarch
b317 meebr RRE_FF "multiply short bfp" g5 esa,zarch
ed0000000017 meeb RXE_FRRD "multiply short bfp" g5 esa,zarch
b30c mdebr RRE_FF "multiply short to long bfp" g5 esa,zarch
ed000000000f mseb RXF_FRRDF "multiply and subtract short bfp" g5 esa,zarch
b384 sfpc RRE_RR_OPT "set fpc" g5 esa,zarch
b299 srnm S_RD "set rounding mode" g5 esa,zarch
-b316 sqxbr RRE_FF "square root extended bfp" g5 esa,zarch
+b316 sqxbr RRE_FEFE "square root extended bfp" g5 esa,zarch
b315 sqdbr RRE_FF "square root long bfp" g5 esa,zarch
ed0000000015 sqdb RXE_FRRD "square root long bfp" g5 esa,zarch
b314 sqebr RRE_FF "square root short bfp" g5 esa,zarch
ed0000000014 sqeb RXE_FRRD "square root short bfp" g5 esa,zarch
b29c stfpc S_RD "store fpc" g5 esa,zarch
-b34b sxbr RRE_FF "subtract extended bfp" g5 esa,zarch
+b34b sxbr RRE_FEFE "subtract extended bfp" g5 esa,zarch
b31b sdbr RRE_FF "subtract long bfp" g5 esa,zarch
ed000000001b sdb RXE_FRRD "subtract long bfp" g5 esa,zarch
b30b sebr RRE_FF "subtract short bfp" g5 esa,zarch
ed000000000b seb RXE_FRRD "subtract short bfp" g5 esa,zarch
-ed0000000012 tcxb RXE_FRRD "test data class extended bfp" g5 esa,zarch
+ed0000000012 tcxb RXE_FERRD "test data class extended bfp" g5 esa,zarch
ed0000000011 tcdb RXE_FRRD "test data class long bfp" g5 esa,zarch
ed0000000010 tceb RXE_FRRD "test data class short bfp" g5 esa,zarch
b274 siga S_RD "signal adapter" g5 esa,zarch
-b2a6 cuutf RRE_RR "convert unicode to utf-8" g5 esa,zarch
+b2a6 cuutf RRE_RERE "convert unicode to utf-8" g5 esa,zarch
b2a7 cutfu RRE_RR "convert utf-8 to unicode" g5 esa,zarch
ee plo SS_RRRDRD2 "perform locked operation" g5 esa,zarch
b25a bsa RRE_RR "branch and set authority" g5 esa,zarch
01ff trap2 E "trap" g5 esa,zarch
b2ff trap4 S_RD "trap4" g5 esa,zarch
b278 stcke S_RD "store clock extended" g5 esa,zarch
-b2a5 tre RRE_RR "translate extended" g5 esa,zarch
-eb000000008e mvclu RSE_RRRD "move long unicode" g5 esa,zarch
+b2a5 tre RRE_RER "translate extended" g5 esa,zarch
+eb000000008e mvclu RSE_RERERD "move long unicode" g5 esa,zarch
e9 pka SS_L2RDRD "pack ascii" g5 esa,zarch
e1 pku SS_L0RDRD "pack unicode" g5 esa,zarch
-b993 troo RRE_RR "translate one to one" g5 esa,zarch
-b992 trot RRE_RR "translate one to two" g5 esa,zarch
-b991 trto RRE_RR "translate two to one" g5 esa,zarch
-b990 trtt RRE_RR "translate two to two" g5 esa,zarch
+b993 troo RRE_RER "translate one to one" g5 esa,zarch
+b992 trot RRE_RER "translate one to two" g5 esa,zarch
+b991 trto RRE_RER "translate two to one" g5 esa,zarch
+b990 trtt RRE_RER "translate two to two" g5 esa,zarch
ea unpka SS_L0RDRD "unpack ascii" g5 esa,zarch
e2 unpku SS_L0RDRD "unpack unicode" g5 esa,zarch
b358 thder RRE_FF "convert short bfp to long hfp" g5 esa,zarch
b351 tbdr RRF_U0FF "convert long hfp to long bfp" g5 esa,zarch
b374 lzer RRE_F0 "load short zero" g5 esa,zarch
b375 lzdr RRE_F0 "load long zero" g5 esa,zarch
-b376 lzxr RRE_F0 "load extended zero" g5 esa,zarch
+b376 lzxr RRE_FE0 "load extended zero" g5 esa,zarch
# Here are the new esame instructions:
b946 bctgr RRE_RR "branch on count 64" z900 zarch
b900 lpgr RRE_RR "load positive 64" z900 zarch
eb0000000025 stctg RSE_CCRD "store control 64" z900 zarch
eb000000002f lctlg RSE_CCRD "load control 64" z900 zarch
eb0000000030 csg RSE_RRRD "compare and swap 64" z900 zarch
-eb000000003e cdsg RSE_RRRD "compare double and swap 64" z900 zarch
+eb000000003e cdsg RSE_RERERD "compare double and swap 64" z900 zarch
eb0000000020 clmh RSE_RURD "compare logical characters under mask high" z900 zarch
eb000000002c stcmh RSE_RURD "store characters under mask high" z900 zarch
eb0000000080 icmh RSE_RURD "insert characters under mask high" z900 zarch
b91c msgfr RRE_RR "multiply single 64<32" z900 zarch
b3a4 cegbr RRE_FR "convert from fixed 64 to short bfp" z900 zarch
b3a5 cdgbr RRE_FR "convert from fixed 64 to long bfp" z900 zarch
-b3a6 cxgbr RRE_FR "convert from fixed 64 to extended bfp" z900 zarch
+b3a6 cxgbr RRE_FER "convert from fixed 64 to extended bfp" z900 zarch
b3a8 cgebr RRF_U0RF "convert to fixed short bfd to 64" z900 zarch
b3a9 cgdbr RRF_U0RF "convert to fixed long bfp to 64" z900 zarch
-b3aa cgxbr RRF_U0RF "convert to fixed extended bfp to 64" z900 zarch
+b3aa cgxbr RRF_U0RFE "convert to fixed extended bfp to 64" z900 zarch
b3c4 cegr RRE_FR "convert from fixed 64 to short hfp" z900 zarch
b3c5 cdgr RRE_FR "convert from fixed 64 to long hfp" z900 zarch
-b3c6 cxgr RRE_FR "convert from fixed 64 to extended hfp" z900 zarch
+b3c6 cxgr RRE_FER "convert from fixed 64 to extended hfp" z900 zarch
b3c8 cger RRF_U0RF "convert to fixed short hfp to 64" z900 zarch
b3c9 cgdr RRF_U0RF "convert to fixed long hfp to 64" z900 zarch
-b3ca cgxr RRF_U0RF "convert to fixed extended hfp to 64" z900 zarch
+b3ca cgxr RRF_U0RFE "convert to fixed extended hfp to 64" z900 zarch
010b tam E "test addressing mode" z900 esa,zarch
010c sam24 E "set addressing mode 24" z900 esa,zarch
010d sam31 E "set addressing mode 31" z900 esa,zarch
a50f llill RI_RU "load logical immediate low low" z900 zarch
b2b1 stfl S_RD "store facility list" z900 esa,zarch
b2b2 lpswe S_RD "load psw extended" z900 zarch
-b90d dsgr RRE_RR "divide single 64" z900 zarch
+b90d dsgr RRE_RER "divide single 64" z900 zarch
b90f lrvgr RRE_RR "load reversed 64" z900 zarch
b916 llgfr RRE_RR "load logical 64<32" z900 zarch
b917 llgtr RRE_RR "load logical thirty one bits" z900 zarch
-b91d dsgfr RRE_RR "divide single 64<32" z900 zarch
+b91d dsgfr RRE_RER "divide single 64<32" z900 zarch
b91f lrvr RRE_RR "load reversed 32" z900 esa,zarch
-b986 mlgr RRE_RR "multiply logical 64" z900 zarch
-b987 dlgr RRE_RR "divide logical 64" z900 zarch
+b986 mlgr RRE_RER "multiply logical 64" z900 zarch
+b987 dlgr RRE_RER "divide logical 64" z900 zarch
b988 alcgr RRE_RR "add logical with carry 64" z900 zarch
b989 slbgr RRE_RR "subtract logical with borrow 64" z900 zarch
b98d epsw RRE_RR "extract psw" z900 esa,zarch
-b996 mlr RRE_RR "multiply logical 32" z900 esa,zarch
-b997 dlr RRE_RR "divide logical 32" z900 esa,zarch
+b996 mlr RRE_RER "multiply logical 32" z900 esa,zarch
+b997 dlr RRE_RER "divide logical 32" z900 esa,zarch
b998 alcr RRE_RR "add logical with carry 32" z900 esa,zarch
b999 slbr RRE_RR "subtract logical with borrow 32" z900 esa,zarch
b99d esea RRE_R0 "extract and set extended authority" z900 zarch
c000 larl RIL_RP "load address relative long" z900 esa,zarch
-e3000000000d dsg RXE_RRRD "divide single 64" z900 zarch
+e3000000000d dsg RXE_RERRD "divide single 64" z900 zarch
e3000000000f lrvg RXE_RRRD "load reversed 64" z900 zarch
e30000000016 llgf RXE_RRRD "load logical 64<32" z900 zarch
e30000000017 llgt RXE_RRRD "load logical thirty one bits" z900 zarch
-e3000000001d dsgf RXE_RRRD "divide single 64<32" z900 zarch
+e3000000001d dsgf RXE_RERRD "divide single 64<32" z900 zarch
e3000000001e lrv RXE_RRRD "load reversed 32" z900 esa,zarch
e3000000001f lrvh RXE_RRRD "load reversed 16" z900 esa,zarch
e3000000002f strvg RXE_RRRD "store reversed 64" z900 zarch
e3000000003e strv RXE_RRRD "store reversed 32" z900 esa,zarch
e3000000003f strvh RXE_RRRD "store reversed 64" z900 esa,zarch
-e30000000086 mlg RXE_RRRD "multiply logical 64" z900 zarch
-e30000000087 dlg RXE_RRRD "divide logical 64" z900 zarch
+e30000000086 mlg RXE_RERRD "multiply logical 64" z900 zarch
+e30000000087 dlg RXE_RERRD "divide logical 64" z900 zarch
e30000000088 alcg RXE_RRRD "add logical with carry 64" z900 zarch
e30000000089 slbg RXE_RRRD "subtract logical with borrow 64" z900 zarch
e3000000008e stpq RXE_RRRD "store pair to quadword" z900 zarch
-e3000000008f lpq RXE_RRRD "load pair from quadword" z900 zarch
-e30000000096 ml RXE_RRRD "multiply logical 32" z900 esa,zarch
-e30000000097 dl RXE_RRRD "divide logical 32" z900 esa,zarch
+e3000000008f lpq RXE_RERRD "load pair from quadword" z900 zarch
+e30000000096 ml RXE_RERRD "multiply logical 32" z900 esa,zarch
+e30000000097 dl RXE_RERRD "divide logical 32" z900 esa,zarch
e30000000098 alc RXE_RRRD "add logical with carry 32" z900 esa,zarch
e30000000099 slb RXE_RRRD "subtract logical with borrow 32" z900 esa,zarch
e30000000090 llgc RXE_RRRD "load logical character" z900 zarch
eb000000001c rllg RSE_RRRD "rotate left single logical 64" z900 zarch
eb000000001d rll RSE_RRRD "rotate left single logical 32" z900 esa,zarch
b369 cxr RRE_FF "compare extended hfp" g5 esa,zarch
-b3b6 cxfr RRE_FR "convert from fixed 32 to extended hfp" g5 esa,zarch
+b3b6 cxfr RRE_FER "convert from fixed 32 to extended hfp" g5 esa,zarch
b3b5 cdfr RRE_FR "convert from fixed 32 to long hfp" g5 esa,zarch
b3b4 cefr RRE_FR "convert from fixed 32 to short hfp" g5 esa,zarch
-b3ba cfxr RRF_U0RF "convert to fixed extended hfp to 32" g5 esa,zarch
+b3ba cfxr RRF_U0RFE "convert to fixed extended hfp to 32" g5 esa,zarch
b3b9 cfdr RRF_U0RF "convert to fixed long hfp to 32" g5 esa,zarch
b3b8 cfer RRF_U0RF "convert to fixed short hfp to 32" g5 esa,zarch
-b362 ltxr RRE_FF "load and test extended hfp" g5 esa,zarch
-b363 lcxr RRE_FF "load complement extended hfp" g5 esa,zarch
-b367 fixr RRE_FF "load fp integer extended hfp" g5 esa,zarch
+b362 ltxr RRE_FEFE "load and test extended hfp" g5 esa,zarch
+b363 lcxr RRE_FEFE "load complement extended hfp" g5 esa,zarch
+b367 fixr RRE_FEFE "load fp integer extended hfp" g5 esa,zarch
b37f fidr RRE_FF "load fp integer long hfp" g5 esa,zarch
b377 fier RRE_FF "load fp integer short hfp" g5 esa,zarch
-b325 lxdr RRE_FF "load lengthened long to extended hfp" g5 esa,zarch
-ed0000000025 lxd RXE_FRRD "load lengthened long to extended hfp" g5 esa,zarch
-b326 lxer RRE_FF "load lengthened short to extended hfp" g5 esa,zarch
-ed0000000026 lxe RXE_FRRD "load lengthened short to extended hfp" g5 esa,zarch
+b325 lxdr RRE_FEF "load lengthened long to extended hfp" g5 esa,zarch
+ed0000000025 lxd RXE_FERRD "load lengthened long to extended hfp" g5 esa,zarch
+b326 lxer RRE_FEF "load lengthened short to extended hfp" g5 esa,zarch
+ed0000000026 lxe RXE_FERRD "load lengthened short to extended hfp" g5 esa,zarch
b324 lder RRE_FF "load lengthened short to long hfp" g5 esa,zarch
ed0000000024 lde RXE_FRRD "load lengthened short to long hfp" g5 esa,zarch
-b361 lnxr RRE_FF "load negative long hfp" g5 esa,zarch
-b360 lpxr RRE_FF "load positive long hfp" g5 esa,zarch
-b366 lexr RRE_FF "load rounded extended to short hfp" g5 esa,zarch
+b361 lnxr RRE_FEFE "load negative extended hfp" g5 esa,zarch
+b360 lpxr RRE_FEFE "load positive extended hfp" g5 esa,zarch
+b366 lexr RRE_FFE "load rounded extended to short hfp" g5 esa,zarch
b337 meer RRE_FF "multiply short hfp" g5 esa,zarch
ed0000000037 mee RXE_FRRD "multiply short hfp" g5 esa,zarch
-b336 sqxr RRE_FF "square root extended hfp" g5 esa,zarch
+b336 sqxr RRE_FEFE "square root extended hfp" g5 esa,zarch
ed0000000034 sqe RXE_FRRD "square root short hfp" g5 esa,zarch
ed0000000035 sqd RXE_FRRD "square root long hfp" g5 esa,zarch
b263 cmpsc RRE_RR "compression call" g5 esa,zarch
eb00000000c0 tp RSL_R0RD "test decimal" g5 esa,zarch
-b365 lxr RRE_FF "load extended fp" g5 esa,zarch
+b365 lxr RRE_FEFE "load extended fp" g5 esa,zarch
b22e pgin RRE_RR "page in" g5 esa,zarch
b22f pgout RRE_RR "page out" g5 esa,zarch
b276 xsch S_00 "cancel subchannel" g5 esa,zarch
e30000000054 ny RXY_RRRD "and with long offset" z990 zarch
e30000000059 cy RXY_RRRD "compare with long offset" z990 zarch
eb0000000014 csy RSY_RRRD "compare and swap with long offset" z990 zarch
-eb0000000031 cdsy RSY_RRRD "compare double and swap with long offset" z990 zarch
+eb0000000031 cdsy RSY_RERERD "compare double and swap with long offset" z990 zarch
e30000000079 chy RXY_RRRD "compare halfword with long offset" z990 zarch
e30000000055 cly RXY_RRRD "compare logical with long offset" z990 zarch
eb0000000055 cliy SIY_URD "compare logical immediate with long offset" z990 zarch
e3000000000a alg RXY_RRRD "add logical with long offset 64" z990 zarch
e3000000000b slg RXY_RRRD "subtract logical with long offset 64" z990 zarch
e3000000000c msg RXY_RRRD "multiply single with long offset 64" z990 zarch
-e3000000000d dsg RXY_RRRD "divide single 64" z990 zarch
+e3000000000d dsg RXY_RERRD "divide single 64" z990 zarch
e3000000000e cvbg RXY_RRRD "convert to binary with long offset 64" z990 zarch
e3000000000f lrvg RXY_RRRD "load reversed 64" z990 zarch
e30000000014 lgf RXY_RRRD "load 64<32" z990 zarch
e3000000001a algf RXY_RRRD "add logical with long offset 64<32" z990 zarch
e3000000001b slgf RXY_RRRD "subtract logical with long offset 64<32" z990 zarch
e3000000001c msgf RXY_RRRD "multiply single with long offset 64<32" z990 zarch
-e3000000001d dsgf RXY_RRRD "divide single 64<32" z990 zarch
+e3000000001d dsgf RXY_RERRD "divide single 64<32" z990 zarch
e3000000001e lrv RXY_RRRD "load reversed 32" z990 esa,zarch
e3000000001f lrvh RXY_RRRD "load reversed 16" z990 esa,zarch
e30000000020 cg RXY_RRRD "compare with long offset 64" z990 zarch
e30000000080 ng RXY_RRRD "and with long offset 64" z990 zarch
e30000000081 og RXY_RRRD "or with long offset 64" z990 zarch
e30000000082 xg RXY_RRRD "exclusive or with long offset 64" z990 zarch
-e30000000086 mlg RXY_RRRD "multiply logical 64" z990 zarch
-e30000000087 dlg RXY_RRRD "divide logical 64" z990 zarch
+e30000000086 mlg RXY_RERRD "multiply logical 64" z990 zarch
+e30000000087 dlg RXY_RERRD "divide logical 64" z990 zarch
e30000000088 alcg RXY_RRRD "add logical with carry 64" z990 zarch
e30000000089 slbg RXY_RRRD "subtract logical with borrow 64" z990 zarch
e3000000008e stpq RXY_RRRD "store pair to quadword" z990 zarch
-e3000000008f lpq RXY_RRRD "load pair from quadword" z990 zarch
+e3000000008f lpq RXY_RERRD "load pair from quadword" z990 zarch
e30000000090 llgc RXY_RRRD "load logical character" z990 zarch
e30000000091 llgh RXY_RRRD "load logical halfword" z990 zarch
-e30000000096 ml RXY_RRRD "multiply logical 32" z990 esa,zarch
-e30000000097 dl RXY_RRRD "divide logical 32" z990 esa,zarch
+e30000000096 ml RXY_RERRD "multiply logical 32" z990 esa,zarch
+e30000000097 dl RXY_RERRD "divide logical 32" z990 esa,zarch
e30000000098 alc RXY_RRRD "add logical with carry 32" z990 esa,zarch
e30000000099 slb RXY_RRRD "subtract logical with borrow 32" z990 esa,zarch
eb0000000004 lmg RSY_RRRD "load multiple with long offset 64" z990 zarch
eb000000002c stcmh RSY_RURD "store characters under mask high with long offset" z990 zarch
eb000000002f lctlg RSY_CCRD "load control 64" z990 zarch
eb0000000030 csg RSY_RRRD "compare and swap with long offset 64" z990 zarch
-eb000000003e cdsg RSY_RRRD "compare double and swap with long offset 64" z990 zarch
+eb000000003e cdsg RSY_RERERD "compare double and swap with long offset 64" z990 zarch
eb0000000044 bxhg RSY_RRRD "branch on index high 64" z990 zarch
eb0000000045 bxleg RSY_RRRD "branch on index low or equal 64" z990 zarch
eb0000000080 icmh RSY_RURD "insert characters under mask high with long offset" z990 zarch
-eb000000008e mvclu RSY_RRRD "move long unicode" z990 esa,zarch
+eb000000008e mvclu RSY_RERERD "move long unicode" z990 esa,zarch
eb000000008f clclu RSY_RRRD "compare logical long unicode with long offset" z990 esa,zarch
eb0000000096 lmh RSY_RRRD "load multiple high" z990 zarch
# new z990 instructions
# z9-109 conditional sske facility, sske instruction entered twice
b22b sske RRF_M0RR "set storage key extended" z9-109 zarch
# z9-109 etf2-enhancement facility, instructions entered twice
-b993 troo RRF_M0RR "translate one to one" z9-109 esa,zarch
-b992 trot RRF_M0RR "translate one to two" z9-109 esa,zarch
-b991 trto RRF_M0RR "translate two to one" z9-109 esa,zarch
-b990 trtt RRF_M0RR "translate two to two" z9-109 esa,zarch
+b993 troo RRF_M0RERE "translate one to one" z9-109 esa,zarch
+b992 trot RRF_M0RERE "translate one to two" z9-109 esa,zarch
+b991 trto RRF_M0RERE "translate two to one" z9-109 esa,zarch
+b990 trtt RRF_M0RERE "translate two to two" z9-109 esa,zarch
# z9-109 etf3-enhancement facility, some instructions entered twice
-b9b1 cu24 RRF_M0RR "convert utf-16 to utf-32" z9-109 zarch
-b2a6 cu21 RRF_M0RR "convert utf-16 to utf-8" z9-109 zarch
-b2a6 cuutf RRF_M0RR "convert unicode to utf-8" z9-109 zarch
-b9b3 cu42 RRE_RR "convert utf-32 to utf-16" z9-109 zarch
-b9b2 cu41 RRE_RR "convert utf-32 to utf-8" z9-109 zarch
-b2a7 cu12 RRF_M0RR "convert utf-8 to utf-16" z9-109 zarch
-b2a7 cutfu RRF_M0RR "convert utf-8 to unicode" z9-109 zarch
-b9b0 cu14 RRF_M0RR "convert utf-8 to utf-32" z9-109 zarch
+b9b1 cu24 RRF_M0RERE "convert utf-16 to utf-32" z9-109 zarch
+b2a6 cu21 RRF_M0RERE "convert utf-16 to utf-8" z9-109 zarch
+b2a6 cuutf RRF_M0RERE "convert unicode to utf-8" z9-109 zarch
+b9b3 cu42 RRE_RERE "convert utf-32 to utf-16" z9-109 zarch
+b9b2 cu41 RRE_RERE "convert utf-32 to utf-8" z9-109 zarch
+b2a7 cu12 RRF_M0RERE "convert utf-8 to utf-16" z9-109 zarch
+b2a7 cutfu RRF_M0RERE "convert utf-8 to unicode" z9-109 zarch
+b9b0 cu14 RRF_M0RERE "convert utf-8 to utf-32" z9-109 zarch
b9eb srstu RRE_RR "search string unicode" z9-109 zarch
d0 trtr SS_L0RDRD "tranlate and test reverse" z9-109 zarch
# z9-109 unnormalized hfp multiply & multiply and add
-b33b myr RRF_F0FF "multiply unnormalized long hfp" z9-109 zarch
+b33b myr RRF_FE0FF "multiply unnormalized long hfp" z9-109 zarch
b33d myhr RRF_F0FF "multiply unnormalized long hfp high" z9-109 zarch
b339 mylr RRF_F0FF "multiply unnormalized long hfp low" z9-109 zarch
-ed000000003b my RXF_FRRDF "multiply unnormalized long hfp" z9-109 zarch
+ed000000003b my RXF_FRRDFE "multiply unnormalized long hfp" z9-109 zarch
ed000000003d myh RXF_FRRDF "multiply unnormalized long hfp high" z9-109 zarch
ed0000000039 myl RXF_FRRDF "multiply unnormalized long hfp low" z9-109 zarch
b33a mayr RRF_F0FF "multiply and add unnormalized long hfp" z9-109 zarch
b3c1 ldgr RRE_FR "load fpr from gr" z9-ec zarch
b3cd lgdr RRE_RF "load gr from fpr" z9-ec zarch
b3d2 adtr RRR_F0FF "add long dfp" z9-ec zarch
-b3da axtr RRR_F0FF "add extended dfp" z9-ec zarch
+b3da axtr RRR_FE0FEFE "add extended dfp" z9-ec zarch
b3e4 cdtr RRE_FF "compare long dfp" z9-ec zarch
-b3ec cxtr RRE_FF "compare extended dfp" z9-ec zarch
+b3ec cxtr RRE_FEFE "compare extended dfp" z9-ec zarch
b3e0 kdtr RRE_FF "compare and signal long dfp" z9-ec zarch
b3e8 kxtr RRE_FF "compare and signal extended dfp" z9-ec zarch
b3f4 cedtr RRE_FF "compare exponent long dfp" z9-ec zarch
-b3fc cextr RRE_FF "compare exponent extended dfp" z9-ec zarch
+b3fc cextr RRE_FEFE "compare exponent extended dfp" z9-ec zarch
b3f1 cdgtr RRE_FR "convert from fixed long dfp" z9-ec zarch
-b3f9 cxgtr RRE_FR "convert from fixed extended dfp" z9-ec zarch
+b3f9 cxgtr RRE_FER "convert from fixed extended dfp" z9-ec zarch
b3f3 cdstr RRE_FR "convert from signed bcd long dfp" z9-ec zarch
b3fb cxstr RRE_FR "convert from signed bcd extended dfp" z9-ec zarch
b3f2 cdutr RRE_FR "convert from unsigned bcd to long dfp" z9-ec zarch
-b3fa cxutr RRE_FR "convert from unsigned bcd to extended dfp" z9-ec zarch
+b3fa cxutr RRE_FER "convert from unsigned bcd to extended dfp" z9-ec zarch
b3e1 cgdtr RRF_U0RF "convert from long dfp to fixed" z9-ec zarch
-b3e9 cgxtr RRF_U0RF "convert from extended dfp to fixed" z9-ec zarch
+b3e9 cgxtr RRF_U0RFE "convert from extended dfp to fixed" z9-ec zarch
b3e3 csdtr RRE_RF "convert from long dfp to signed bcd" z9-ec zarch
-b3eb csxtr RRE_RF "convert from extended dfp to signed bcd" z9-ec zarch
+b3eb csxtr RRE_RFE "convert from extended dfp to signed bcd" z9-ec zarch
b3e2 cudtr RRE_RF "convert from long dfp to unsigned bcd" z9-ec zarch
-b3ea cuxtr RRE_RF "convert from extended dfp to unsigned bcd" z9-ec zarch
+b3ea cuxtr RRE_RFE "convert from extended dfp to unsigned bcd" z9-ec zarch
b3d1 ddtr RRR_F0FF "divide long dfp" z9-ec zarch
-b3d9 dxtr RRR_F0FF "divide extended dfp" z9-ec zarch
+b3d9 dxtr RRR_FE0FEFE "divide extended dfp" z9-ec zarch
b3e5 eedtr RRE_RF "extract biased exponent from long dfp" z9-ec zarch
-b3ed eextr RRE_RF "extract biased exponent from extended dfp" z9-ec zarch
+b3ed eextr RRE_RFE "extract biased exponent from extended dfp" z9-ec zarch
b3e7 esdtr RRE_RF "extract significance from long dfp" z9-ec zarch
-b3ef esxtr RRE_RF "extract significance from extended dfp" z9-ec zarch
+b3ef esxtr RRE_RFE "extract significance from extended dfp" z9-ec zarch
b3f6 iedtr RRF_F0FR "insert biased exponent long dfp" z9-ec zarch
-b3fe iextr RRF_F0FR "insert biased exponent extended dfp" z9-ec zarch
+b3fe iextr RRF_FE0FER "insert biased exponent extended dfp" z9-ec zarch
b3d6 ltdtr RRE_FF "load and test long dfp" z9-ec zarch
-b3de ltxtr RRE_FF "load and test extended dfp" z9-ec zarch
+b3de ltxtr RRE_FEFE "load and test extended dfp" z9-ec zarch
b3d7 fidtr RRF_UUFF "load fp integer long dfp" z9-ec zarch
-b3df fixtr RRF_UUFF "load fp integer extended dfp" z9-ec zarch
+b3df fixtr RRF_UUFEFE "load fp integer extended dfp" z9-ec zarch
b2bd lfas S_RD "load fpd and signal" z9-ec zarch
b3d4 ldetr RRF_0UFF "load lengthened long dfp" z9-ec zarch
-b3dc lxdtr RRF_0UFF "load lengthened extended dfp" z9-ec zarch
+b3dc lxdtr RRF_0UFEF "load lengthened extended dfp" z9-ec zarch
b3d5 ledtr RRF_UUFF "load rounded long dfp" z9-ec zarch
-b3dd ldxtr RRF_UUFF "load rounded extended dfp" z9-ec zarch
+b3dd ldxtr RRF_UUFFE "load rounded extended dfp" z9-ec zarch
b3d0 mdtr RRR_F0FF "multiply long dfp" z9-ec zarch
-b3d8 mxtr RRR_F0FF "multiply extended dfp" z9-ec zarch
+b3d8 mxtr RRR_FE0FEFE "multiply extended dfp" z9-ec zarch
b3f5 qadtr RRF_FUFF "Quantize long dfp" z9-ec zarch
-b3fd qaxtr RRF_FUFF "Quantize extended dfp" z9-ec zarch
+b3fd qaxtr RRF_FEUFEFE "Quantize extended dfp" z9-ec zarch
b3f7 rrdtr RRF_FFRU "Reround long dfp" z9-ec zarch
-b3ff rrxtr RRF_FFRU "Reround extended dfp" z9-ec zarch
+b3ff rrxtr RRF_FEFERU "Reround extended dfp" z9-ec zarch
b2b9 srnmt S_RD "set rounding mode dfp" z9-ec zarch
b385 sfasr RRE_R0 "set fpc and signal" z9-ec zarch
ed0000000040 sldt RXF_FRRDF "shift coefficient left long dfp" z9-ec zarch
-ed0000000048 slxt RXF_FRRDF "shift coefficient left extended dfp" z9-ec zarch
+ed0000000048 slxt RXF_FERRDFE "shift coefficient left extended dfp" z9-ec zarch
ed0000000041 srdt RXF_FRRDF "shift coefficient right long dfp" z9-ec zarch
-ed0000000049 srxt RXF_FRRDF "shift coefficient right extended dfp" z9-ec zarch
+ed0000000049 srxt RXF_FERRDFE "shift coefficient right extended dfp" z9-ec zarch
b3d3 sdtr RRR_F0FF "subtract long dfp" z9-ec zarch
-b3db sxtr RRR_F0FF "subtract extended dfp" z9-ec zarch
+b3db sxtr RRR_FE0FEFE "subtract extended dfp" z9-ec zarch
ed0000000050 tdcet RXE_FRRD "test data class short dfp" z9-ec zarch
ed0000000054 tdcdt RXE_FRRD "test data class long dfp" z9-ec zarch
-ed0000000058 tdcxt RXE_FRRD "test data class extended dfp" z9-ec zarch
+ed0000000058 tdcxt RXE_FERRD "test data class extended dfp" z9-ec zarch
ed0000000051 tdget RXE_FRRD "test data group short dfp" z9-ec zarch
ed0000000055 tdgdt RXE_FRRD "test data group long dfp" z9-ec zarch
-ed0000000059 tdgxt RXE_FRRD "test data group extended dfp" z9-ec zarch
+ed0000000059 tdgxt RXE_FERRD "test data group extended dfp" z9-ec zarch
010a pfpo E "perform floating point operation" z9-ec zarch
c801 ectg SSF_RRDRD "extract cpu time" z9-ec zarch
c802 csst SSF_RRDRD "compare and swap and store" z9-ec zarch
e544 mvhhi SIL_RDI "move (16<16)" z10 zarch
e54c mvhi SIL_RDI "move (32<16)" z10 zarch
e548 mvghi SIL_RDI "move (64<16)" z10 zarch
-e3000000005c mfy RXY_RRRD "multiply" z10 zarch
+e3000000005c mfy RXY_RERRD "multiply" z10 zarch
e3000000007c mhy RXY_RRRD "multiply halfword" z10 zarch
c201 msfi RIL_RI "multiply single immediate (32)" z10 zarch
c200 msgfi RIL_RI "multiply single immediate (64)" z10 zarch
af00 mc SI_URD "monitor call" z10 zarch
b9a2 ptf RRE_R0 "perform topology function" z10 zarch
b9af pfmf RRE_RR "perform frame management function" z10 zarch
-b9bf trte RRF_M0RR "translate and test extended" z10 zarch
-b9bd trtre RRF_M0RR "translate and test reverse extended" z10 zarch
+b9bf trte RRF_M0RER "translate and test extended" z10 zarch
+b9bd trtre RRF_M0RER "translate and test reverse extended" z10 zarch
b9c8 ahhhr RRF_R0RR2 "add high high" z196 zarch
b9d8 ahhlr RRF_R0RR2 "add high low" z196 zarch
cc08 aih RIL_RI "add immediate high" z196 zarch
eb00000000e7 laxg RSY_RRRD "load and exclusive or 64 bit" z196 zarch
eb00000000f6 lao RSY_RRRD "load and or 32 bit" z196 zarch
eb00000000e6 laog RSY_RRRD "load and or 64 bit" z196 zarch
-c804 lpd SSF_RRDRD2 "load pair disjoint 32 bit" z196 zarch
-c805 lpdg SSF_RRDRD2 "load pair disjoint 64 bit" z196 zarch
+c804 lpd SSF_RERDRD2 "load pair disjoint 32 bit" z196 zarch
+c805 lpdg SSF_RERDRD2 "load pair disjoint 64 bit" z196 zarch
b9f2 locr RRF_U0RR "load on condition 32 bit" z196 zarch
b9f200000000 locr*16 RRF_00RR "load on condition 32 bit" z196 zarch
b9e2 locgr RRF_U0RR "load on condition 64 bit" z196 zarch
b9ae rrbm RRE_RR "reset reference bits multiple" z196 zarch
b394 cefbra RRF_UUFR "convert from 32 bit fixed to short bfp with rounding mode" z196 zarch
b395 cdfbra RRF_UUFR "convert from 32 bit fixed to long bfp with rounding mode" z196 zarch
-b396 cxfbra RRF_UUFR "convert from 32 bit fixed to extended bfp with rounding mode" z196 zarch
+b396 cxfbra RRF_UUFER "convert from 32 bit fixed to extended bfp with rounding mode" z196 zarch
b3a4 cegbra RRF_UUFR "convert from 64 bit fixed to short bfp with rounding mode" z196 zarch
b3a5 cdgbra RRF_UUFR "convert from 64 bit fixed to long bfp with rounding mode" z196 zarch
-b3a6 cxgbra RRF_UUFR "convert from 64 bit fixed to extended bfp with rounding mode" z196 zarch
+b3a6 cxgbra RRF_UUFER "convert from 64 bit fixed to extended bfp with rounding mode" z196 zarch
b390 celfbr RRF_UUFR "convert from 32 bit logical fixed to short bfp with rounding mode" z196 zarch
b391 cdlfbr RRF_UUFR "convert from 32 bit logical fixed to long bfp with rounding mode" z196 zarch
-b392 cxlfbr RRF_UUFR "convert from 32 bit logical fixed to extended bfp with rounding mode" z196 zarch
+b392 cxlfbr RRF_UUFER "convert from 32 bit logical fixed to extended bfp with rounding mode" z196 zarch
b3a0 celgbr RRF_UUFR "convert from 64 bit logical fixed to short bfp with rounding mode" z196 zarch
b3a1 cdlgbr RRF_UUFR "convert from 64 bit logical fixed to long bfp with rounding mode" z196 zarch
-b3a2 cxlgbr RRF_UUFR "convert from 64 bit logical fixed to extended bfp with rounding mode" z196 zarch
+b3a2 cxlgbr RRF_UUFER "convert from 64 bit logical fixed to extended bfp with rounding mode" z196 zarch
b398 cfebra RRF_UURF "convert to 32 bit fixed from short bfp with rounding mode" z196 zarch
b399 cfdbra RRF_UURF "convert to 32 bit fixed from long bfp with rounding mode" z196 zarch
-b39a cfxbra RRF_UURF "convert to 32 bit fixed from extended bfp with rounding mode" z196 zarch
+b39a cfxbra RRF_UURFE "convert to 32 bit fixed from extended bfp with rounding mode" z196 zarch
b3a8 cgebra RRF_UURF "convert to 64 bit fixed from short bfp with rounding mode" z196 zarch
b3a9 cgdbra RRF_UURF "convert to 64 bit fixed from long bfp with rounding mode" z196 zarch
-b3aa cgxbra RRF_UURF "convert to 64 bit fixed from extended bfp with rounding mode" z196 zarch
+b3aa cgxbra RRF_UURFE "convert to 64 bit fixed from extended bfp with rounding mode" z196 zarch
b39c clfebr RRF_UURF "convert to 32 bit fixed logical from short bfp with rounding mode" z196 zarch
b39d clfdbr RRF_UURF "convert to 32 bit fixed logical from long bfp with rounding mode" z196 zarch
-b39e clfxbr RRF_UURF "convert to 32 bit fixed logical from extended bfp with rounding mode" z196 zarch
+b39e clfxbr RRF_UURFE "convert to 32 bit fixed logical from extended bfp with rounding mode" z196 zarch
b3ac clgebr RRF_UURF "convert to 64 bit fixed logical from short bfp with rounding mode" z196 zarch
b3ad clgdbr RRF_UURF "convert to 64 bit fixed logical from long bfp with rounding mode" z196 zarch
-b3ae clgxbr RRF_UURF "convert to 64 bit fixed logical from extended bfp with rounding mode" z196 zarch
+b3ae clgxbr RRF_UURFE "convert to 64 bit fixed logical from extended bfp with rounding mode" z196 zarch
b357 fiebra RRF_UUFF "load fp integer short bfp with rounding mode" z196 zarch
b35f fidbra RRF_UUFF "load fp integer long bfp with rounding mode" z196 zarch
-b347 fixbra RRF_UUFF "load fp integer extended bfp with rounding mode" z196 zarch
+b347 fixbra RRF_UUFEFE "load fp integer extended bfp with rounding mode" z196 zarch
b344 ledbra RRF_UUFF "load rounded short/long bfp to short/long bfp with rounding mode" z196 zarch
-b345 ldxbra RRF_UUFF "load rounded long/extended bfp to long/extended bfp with rounding mode" z196 zarch
-b346 lexbra RRF_UUFF "load rounded short/extended bfp to short/extended bfp with rounding mode" z196 zarch
+b345 ldxbra RRF_UUFEFE "load rounded long/extended bfp to long/extended bfp with rounding mode" z196 zarch
+b346 lexbra RRF_UUFEFE "load rounded short/extended bfp to short/extended bfp with rounding mode" z196 zarch
b3d2 adtra RRF_FUFF2 "add long dfp with rounding mode" z196 zarch
-b3da axtra RRF_FUFF2 "add extended dfp with rounding mode" z196 zarch
+b3da axtra RRF_FEUFEFE2 "add extended dfp with rounding mode" z196 zarch
b3f1 cdgtra RRF_UUFR "convert from fixed long dfp with rounding mode" z196 zarch
b951 cdftr RRF_UUFR "convert from 32 bit fixed to long dfp with rounding mode" z196 zarch
-b959 cxftr RRF_UUFR "convert from 32 bit fixed to extended dfp with rounding mode" z196 zarch
-b3f9 cxgtra RRF_UUFR "convert from fixed extended dfp with rounding mode" z196 zarch
+b959 cxftr RRF_UUFER "convert from 32 bit fixed to extended dfp with rounding mode" z196 zarch
+b3f9 cxgtra RRF_UUFER "convert from fixed extended dfp with rounding mode" z196 zarch
b952 cdlgtr RRF_UUFR "convert from 64 bit fixed logical to long dfp with rounding mode" z196 zarch
-b95a cxlgtr RRF_UUFR "convert from 64 bit fixed logical to extended dfp with rounding mode" z196 zarch
+b95a cxlgtr RRF_UUFER "convert from 64 bit fixed logical to extended dfp with rounding mode" z196 zarch
b953 cdlftr RRF_UUFR "convert from 32 bit fixed logical to long dfp with rounding mode" z196 zarch
b95b cxlftr RRF_UUFR "convert from 32 bit fixed logical to extended dfp with rounding mode" z196 zarch
b3e1 cgdtra RRF_UURF "convert to 64 bit fixed from long dfp with rounding mode" z196 zarch
-b3e9 cgxtra RRF_UURF "convert to 64 bit fixed from extended dfp with rounding mode" z196 zarch
+b3e9 cgxtra RRF_UURFE "convert to 64 bit fixed from extended dfp with rounding mode" z196 zarch
b941 cfdtr RRF_UURF "convert to 32 bit fixed from long dfp source with rounding mode" z196 zarch
b949 cfxtr RRF_UURF "convert to 32 bit fixed from extended dfp source with rounding mode" z196 zarch
b942 clgdtr RRF_UURF "convert to 64 bit fixed logical from long dfp with rounding mode" z196 zarch
-b94a clgxtr RRF_UURF "convert to 64 bit fixed logical from extended dfp with rounding mode" z196 zarch
+b94a clgxtr RRF_UURFE "convert to 64 bit fixed logical from extended dfp with rounding mode" z196 zarch
b943 clfdtr RRF_UURF "convert to 32 bit fixed logical from long dfp with rounding mode" z196 zarch
-b94b clfxtr RRF_UURF "convert to 32 bit fixed logical from extended dfp with rounding mode" z196 zarch
+b94b clfxtr RRF_UURFE "convert to 32 bit fixed logical from extended dfp with rounding mode" z196 zarch
b3d1 ddtra RRF_FUFF2 "divide long dfp with rounding mode" z196 zarch
-b3d9 dxtra RRF_FUFF2 "divide extended dfp with rounding mode" z196 zarch
+b3d9 dxtra RRF_FEUFEFE2 "divide extended dfp with rounding mode" z196 zarch
b3d0 mdtra RRF_FUFF2 "multiply long dfp with rounding mode" z196 zarch
-b3d8 mxtra RRF_FUFF2 "multiply extended dfp with rounding mode" z196 zarch
+b3d8 mxtra RRF_FEUFEFE2 "multiply extended dfp with rounding mode" z196 zarch
b3d3 sdtra RRF_FUFF2 "subtract long dfp with rounding mode" z196 zarch
-b3db sxtra RRF_FUFF2 "subtract extended dfp with rounding mode" z196 zarch
+b3db sxtra RRF_FEUFEFE2 "subtract extended dfp with rounding mode" z196 zarch
b2b8 srnmb S_RD "set 3 bit bfp rounding mode" z196 zarch